From af7e6216a6eb9e09e51143bb66691b00e46d3351 Mon Sep 17 00:00:00 2001 From: Jon Christiansen <467023+theJC@users.noreply.github.com> Date: Mon, 15 Jul 2024 22:45:48 -0500 Subject: [PATCH 001/108] Helm chart: Support providing RollingUpdate maxSurge and maxUnavailable values --- helm/chart/router/templates/deployment.yaml | 10 ++++++++++ helm/chart/router/values.yaml | 9 +++++++++ 2 files changed, 19 insertions(+) diff --git a/helm/chart/router/templates/deployment.yaml b/helm/chart/router/templates/deployment.yaml index 6a09cf521b..38a7c6054e 100644 --- a/helm/chart/router/templates/deployment.yaml +++ b/helm/chart/router/templates/deployment.yaml @@ -172,3 +172,13 @@ spec: topologySpreadConstraints: {{- toYaml . | nindent 8 }} {{- end }} + {{- if .Values.rollingUpdate }} + strategy: + rollingUpdate: + {{- if .Values.rollingUpdate.maxUnavailable }} + maxUnavailable: {{ .Values.rollingUpdate.maxUnavailable }} + {{- end }} + {{- if .Values.rollingUpdate.maxSurge }} + maxSurge: {{ .Values.rollingUpdate.maxSurge }} + {{- end }} + {{- end }} \ No newline at end of file diff --git a/helm/chart/router/values.yaml b/helm/chart/router/values.yaml index 3e67a224f1..01045bd90a 100644 --- a/helm/chart/router/values.yaml +++ b/helm/chart/router/values.yaml @@ -218,6 +218,15 @@ autoscaling: # type: cpu # targetUtilizationPercentage: 75 + +rollingUpdate: + {} +# Adjust rolling update strategy. Can take absolute values or % values. +# (https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment) +# Defaults if not set are: +# maxUnavailable: 25% +# maxSurge: 25% + nodeSelector: {} tolerations: [] From 1e6da59b341bb9ff9caddefc9d016ad52cfe59f1 Mon Sep 17 00:00:00 2001 From: Jon Christiansen <467023+theJC@users.noreply.github.com> Date: Mon, 15 Jul 2024 22:47:32 -0500 Subject: [PATCH 002/108] Add changeset --- .changesets/feat_helm_rollingupdate.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .changesets/feat_helm_rollingupdate.md diff --git a/.changesets/feat_helm_rollingupdate.md b/.changesets/feat_helm_rollingupdate.md new file mode 100644 index 0000000000..b59d016fb0 --- /dev/null +++ b/.changesets/feat_helm_rollingupdate.md @@ -0,0 +1,6 @@ +### Support providing RollingUpdate maxSurge and maxUnavailable values ([Issue #5664](https://github.com/apollographql/router/issues/5664)) + +RollingUpdate maxSurge and maxUnavailable are commonly used deployment configuration values. This change makes their +values able to be set via the router helm chart. + +By [Jon Christiansen](https://github.com/theJC) in https://github.com/apollographql/router/pull/5665 From cc769061df22d20bbfc4b5d4fb37bad364bb4ea7 Mon Sep 17 00:00:00 2001 From: Jon Christiansen <467023+theJC@users.noreply.github.com> Date: Mon, 15 Jul 2024 23:45:35 -0500 Subject: [PATCH 003/108] Update condition checking maxSurge and maxUnavailable to hasKey so value 0 will work properly --- helm/chart/router/templates/deployment.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/helm/chart/router/templates/deployment.yaml b/helm/chart/router/templates/deployment.yaml index 38a7c6054e..a9c6d37565 100644 --- a/helm/chart/router/templates/deployment.yaml +++ b/helm/chart/router/templates/deployment.yaml @@ -175,10 +175,10 @@ spec: {{- if .Values.rollingUpdate }} strategy: rollingUpdate: - {{- if .Values.rollingUpdate.maxUnavailable }} + {{- if (hasKey .Values.rollingUpdate "maxUnavailable") }} maxUnavailable: {{ .Values.rollingUpdate.maxUnavailable }} {{- end }} - {{- if .Values.rollingUpdate.maxSurge }} + {{- if (hasKey .Values.rollingUpdate "maxSurge") }} maxSurge: {{ .Values.rollingUpdate.maxSurge }} {{- end }} {{- end }} \ No newline at end of file From 2c9b329b3108e475e2641c94dba53e2f05135b7c Mon Sep 17 00:00:00 2001 From: Jon Christiansen <467023+theJC@users.noreply.github.com> Date: Tue, 16 Jul 2024 09:58:02 -0500 Subject: [PATCH 004/108] Update helm/chart/router/templates/deployment.yaml Add suggested change per PR comments Co-authored-by: Nishchal Gautam --- helm/chart/router/templates/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/chart/router/templates/deployment.yaml b/helm/chart/router/templates/deployment.yaml index a9c6d37565..3d46ac978b 100644 --- a/helm/chart/router/templates/deployment.yaml +++ b/helm/chart/router/templates/deployment.yaml @@ -181,4 +181,4 @@ spec: {{- if (hasKey .Values.rollingUpdate "maxSurge") }} maxSurge: {{ .Values.rollingUpdate.maxSurge }} {{- end }} - {{- end }} \ No newline at end of file + {{- end }} From 2979e2b6716002ed91478617c67cecb1cb616418 Mon Sep 17 00:00:00 2001 From: Lenny Burdette Date: Wed, 24 Jul 2024 12:58:38 -0400 Subject: [PATCH 005/108] fix: add field arguments to existing types (#5722) --- apollo-federation/src/merge.rs | 55 ++++++++++--------- ...ederation__merge__tests__steel_thread.snap | 2 +- .../merge/connector_Query_users_0.graphql | 2 +- 3 files changed, 30 insertions(+), 29 deletions(-) diff --git a/apollo-federation/src/merge.rs b/apollo-federation/src/merge.rs index 0aed048c60..aaa8af294b 100644 --- a/apollo-federation/src/merge.rs +++ b/apollo-federation/src/merge.rs @@ -128,6 +128,7 @@ impl Merger { needs_inaccessible: false, } } + fn merge(&mut self, subgraphs: ValidFederationSubgraphs) -> Result { let mut subgraphs = subgraphs .into_iter() @@ -534,20 +535,33 @@ impl Merger { ); for arg in field.arguments.iter() { - let arguments = &mut supergraph_field.make_mut().arguments; - if let Some(index) = arguments.iter().position(|a| a.name == arg.name) { - if let Some(existing_arg) = arguments.get_mut(index) { - // TODO add args - let mutable_arg = existing_arg.make_mut(); - self.add_inaccessible( - directive_names, - &mut mutable_arg.directives, - &arg.directives, - ); - } else { - // TODO mismatch no args - } - } + let arguments_to_merge = &mut supergraph_field.make_mut().arguments; + let argument_to_merge = arguments_to_merge + .iter_mut() + .find_map(|a| (a.name == arg.name).then(|| a.make_mut())); + + if let Some(argument) = argument_to_merge { + self.add_inaccessible( + directive_names, + &mut argument.directives, + &arg.directives, + ); + } else { + let mut argument = InputValueDefinition { + name: arg.name.clone(), + description: arg.description.clone(), + directives: Default::default(), + ty: arg.ty.clone(), + default_value: arg.default_value.clone(), + }; + + self.add_inaccessible( + directive_names, + &mut argument.directives, + &arg.directives, + ); + arguments_to_merge.push(argument.into()); + }; } let requires_directive_option = field @@ -704,19 +718,6 @@ impl Merger { } } -fn filter_directives<'a, D, I, O>(deny_list: &IndexSet, directives: D) -> O -where - D: IntoIterator, - I: 'a + AsRef + Clone, - O: FromIterator, -{ - directives - .into_iter() - .filter(|d| !deny_list.contains(&d.as_ref().name)) - .cloned() - .collect() -} - struct DirectiveNames { key: Name, requires: Name, diff --git a/apollo-federation/src/snapshots/apollo_federation__merge__tests__steel_thread.snap b/apollo-federation/src/snapshots/apollo_federation__merge__tests__steel_thread.snap index 8ace8b2b86..54ee822eea 100644 --- a/apollo-federation/src/snapshots/apollo_federation__merge__tests__steel_thread.snap +++ b/apollo-federation/src/snapshots/apollo_federation__merge__tests__steel_thread.snap @@ -52,6 +52,6 @@ type User @join__type(graph: CONNECTOR_QUERY_USER_0, key: "id") @join__type(grap type Query @join__type(graph: CONNECTOR_QUERY_USER_0) @join__type(graph: CONNECTOR_QUERY_USERS_0) @join__type(graph: CONNECTOR_USER_D_1) @join__type(graph: GRAPHQL) { user(id: ID!): User @join__field(graph: CONNECTOR_QUERY_USER_0) - users: [User] @join__field(graph: CONNECTOR_QUERY_USERS_0) + users(limit: Int): [User] @join__field(graph: CONNECTOR_QUERY_USERS_0) _: ID @inaccessible @join__field(graph: CONNECTOR_USER_D_1) } diff --git a/apollo-federation/src/sources/connect/expand/merge/connector_Query_users_0.graphql b/apollo-federation/src/sources/connect/expand/merge/connector_Query_users_0.graphql index 13fb5dea4a..414b83db47 100644 --- a/apollo-federation/src/sources/connect/expand/merge/connector_Query_users_0.graphql +++ b/apollo-federation/src/sources/connect/expand/merge/connector_Query_users_0.graphql @@ -73,5 +73,5 @@ type User { } type Query { - users: [User] + users(limit: Int): [User] } From 4f9129988fe573206c326bf2d3f26fdf04d612b0 Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Thu, 25 Jul 2024 11:42:55 +0200 Subject: [PATCH 006/108] perf: don't re-create meter and instruments on every calls (#5629) Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- ...int_bnjjj_improve_perf_custom_telemetry.md | 5 + .../plugins/telemetry/config_new/cache/mod.rs | 54 +- .../plugins/telemetry/config_new/cost/mod.rs | 53 +- .../graphql/field.execution/metrics.snap | 1 + .../graphql/field.length/metrics.snap | 1 + .../telemetry/config_new/graphql/mod.rs | 80 +- .../telemetry/config_new/instruments.rs | 759 +++++++++++++++--- apollo-router/src/plugins/telemetry/mod.rs | 75 +- 8 files changed, 785 insertions(+), 243 deletions(-) create mode 100644 .changesets/maint_bnjjj_improve_perf_custom_telemetry.md diff --git a/.changesets/maint_bnjjj_improve_perf_custom_telemetry.md b/.changesets/maint_bnjjj_improve_perf_custom_telemetry.md new file mode 100644 index 0000000000..cc27c908d5 --- /dev/null +++ b/.changesets/maint_bnjjj_improve_perf_custom_telemetry.md @@ -0,0 +1,5 @@ +### Improve performance, don't re-create meter and instruments on every calls in Telemetry ([PR #5629](https://github.com/apollographql/router/pull/5629)) + +The creation of otel instruments using a regex is no longer part of the hot path. Now we create these instruments when starting the telemetry plugin and not in every serives. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5629 \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs b/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs index f1d01f2393..adc172911b 100644 --- a/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs +++ b/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs @@ -1,29 +1,19 @@ use std::sync::Arc; use attributes::CacheAttributes; -use opentelemetry::metrics::MeterProvider; -use opentelemetry::metrics::Unit; use opentelemetry::Key; use opentelemetry::KeyValue; -use parking_lot::Mutex; use schemars::JsonSchema; use serde::Deserialize; use tower::BoxError; use super::instruments::CustomCounter; -use super::instruments::CustomCounterInner; -use super::instruments::Increment; -use super::instruments::InstrumentsConfig; -use super::instruments::METER_NAME; -use super::selectors::CacheKind; use super::selectors::SubgraphSelector; -use crate::metrics; use crate::plugins::cache::entity::CacheHitMiss; use crate::plugins::cache::entity::CacheSubgraph; use crate::plugins::cache::metrics::CacheMetricContextKey; use crate::plugins::telemetry::config::AttributeValue; use crate::plugins::telemetry::config_new::attributes::DefaultAttributeRequirementLevel; -use crate::plugins::telemetry::config_new::conditions::Condition; use crate::plugins::telemetry::config_new::extendable::Extendable; use crate::plugins::telemetry::config_new::instruments::DefaultedStandardInstrument; use crate::plugins::telemetry::config_new::instruments::Instrumented; @@ -33,7 +23,7 @@ use crate::services::subgraph; pub(crate) mod attributes; -static CACHE_METRIC: &str = "apollo.router.operations.entity.cache"; +pub(crate) const CACHE_METRIC: &str = "apollo.router.operations.entity.cache"; const ENTITY_TYPE: Key = Key::from_static_str("entity.type"); const CACHE_HIT: Key = Key::from_static_str("cache.hit"); @@ -63,48 +53,6 @@ pub(crate) struct CacheInstruments { >, } -impl From<&InstrumentsConfig> for CacheInstruments { - fn from(value: &InstrumentsConfig) -> Self { - let meter = metrics::meter_provider().meter(METER_NAME); - CacheInstruments { - cache_hit: value.cache.attributes.cache.is_enabled().then(|| { - let mut nb_attributes = 0; - let selectors = match &value.cache.attributes.cache { - DefaultedStandardInstrument::Bool(_) | DefaultedStandardInstrument::Unset => { - None - } - DefaultedStandardInstrument::Extendable { attributes } => { - nb_attributes = attributes.custom.len(); - Some(attributes.clone()) - } - }; - CustomCounter { - inner: Mutex::new(CustomCounterInner { - increment: Increment::Custom(None), - condition: Condition::True, - counter: Some( - meter - .f64_counter(CACHE_METRIC) - .with_unit(Unit::new("ops")) - .with_description( - "Entity cache hit/miss operations at the subgraph level", - ) - .init(), - ), - attributes: Vec::with_capacity(nb_attributes), - selector: Some(Arc::new(SubgraphSelector::Cache { - cache: CacheKind::Hit, - entity_type: None, - })), - selectors, - incremented: false, - }), - } - }), - } - } -} - impl Instrumented for CacheInstruments { type Request = subgraph::Request; type Response = subgraph::Response; diff --git a/apollo-router/src/plugins/telemetry/config_new/cost/mod.rs b/apollo-router/src/plugins/telemetry/config_new/cost/mod.rs index df3dcf7b0d..503b191904 100644 --- a/apollo-router/src/plugins/telemetry/config_new/cost/mod.rs +++ b/apollo-router/src/plugins/telemetry/config_new/cost/mod.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; use std::sync::Arc; use opentelemetry::metrics::MeterProvider; @@ -9,6 +10,7 @@ use serde::Deserialize; use tower::BoxError; use super::instruments::Increment; +use super::instruments::StaticInstrument; use crate::metrics; use crate::plugins::demand_control::CostContext; use crate::plugins::telemetry::config::AttributeValue; @@ -115,7 +117,28 @@ pub(crate) struct CostInstrumentsConfig { } impl CostInstrumentsConfig { - pub(crate) fn to_instruments(&self) -> CostInstruments { + pub(crate) fn new_static_instruments(&self) -> HashMap { + let meter = metrics::meter_provider() + .meter(crate::plugins::telemetry::config_new::instruments::METER_NAME); + + [( + COST_ESTIMATED.to_string(), + StaticInstrument::Histogram(meter.f64_histogram(COST_ESTIMATED).with_description("Estimated cost of the operation using the currently configured cost model").init()), + ),( + COST_ACTUAL.to_string(), + StaticInstrument::Histogram(meter.f64_histogram(COST_ACTUAL).with_description("Actual cost of the operation using the currently configured cost model").init()), + ),( + COST_DELTA.to_string(), + StaticInstrument::Histogram(meter.f64_histogram(COST_DELTA).with_description("Delta between the estimated and actual cost of the operation using the currently configured cost model").init()), + )] + .into_iter() + .collect() + } + + pub(crate) fn to_instruments( + &self, + static_instruments: Arc>, + ) -> CostInstruments { let cost_estimated = self.cost_estimated.is_enabled().then(|| { Self::histogram( COST_ESTIMATED, @@ -123,6 +146,7 @@ impl CostInstrumentsConfig { SupergraphSelector::Cost { cost: CostValue::Estimated, }, + &static_instruments, ) }); @@ -133,6 +157,7 @@ impl CostInstrumentsConfig { SupergraphSelector::Cost { cost: CostValue::Actual, }, + &static_instruments, ) }); @@ -143,6 +168,7 @@ impl CostInstrumentsConfig { SupergraphSelector::Cost { cost: CostValue::Delta, }, + &static_instruments, ) }); CostInstruments { @@ -156,9 +182,8 @@ impl CostInstrumentsConfig { name: &'static str, config: &DefaultedStandardInstrument>, selector: SupergraphSelector, + static_instruments: &Arc>, ) -> CustomHistogram { - let meter = metrics::meter_provider() - .meter(crate::plugins::telemetry::config_new::instruments::METER_NAME); let mut nb_attributes = 0; let selectors = match config { DefaultedStandardInstrument::Bool(_) | DefaultedStandardInstrument::Unset => None, @@ -172,7 +197,13 @@ impl CostInstrumentsConfig { inner: Mutex::new(CustomHistogramInner { increment: Increment::EventCustom(None), condition: Condition::True, - histogram: Some(meter.f64_histogram(name).init()), + histogram: Some( + static_instruments + .get(name) + .expect("cannot get static instrument for cost; this should not happen") + .as_histogram() + .expect("cannot convert instrument to histogram for cost; this should not happen").clone(), + ), attributes: Vec::with_capacity(nb_attributes), selector: Some(Arc::new(selector)), selectors, @@ -307,6 +338,8 @@ pub(crate) fn add_cost_attributes(context: &Context, custom_attributes: &mut Vec #[cfg(test)] mod test { + use std::sync::Arc; + use crate::context::OPERATION_NAME; use crate::plugins::demand_control::CostContext; use crate::plugins::telemetry::config_new::cost::CostInstruments; @@ -318,7 +351,7 @@ mod test { #[test] fn test_default_estimated() { let config = config(include_str!("fixtures/cost_estimated.router.yaml")); - let instruments = config.to_instruments(); + let instruments = config.to_instruments(Arc::new(config.new_static_instruments())); make_request(&instruments); assert_histogram_sum!("cost.estimated", 100.0); @@ -330,7 +363,7 @@ mod test { #[test] fn test_default_actual() { let config = config(include_str!("fixtures/cost_actual.router.yaml")); - let instruments = config.to_instruments(); + let instruments = config.to_instruments(Arc::new(config.new_static_instruments())); make_request(&instruments); assert_histogram_sum!("cost.actual", 10.0); @@ -342,7 +375,7 @@ mod test { #[test] fn test_default_delta() { let config = config(include_str!("fixtures/cost_delta.router.yaml")); - let instruments = config.to_instruments(); + let instruments = config.to_instruments(Arc::new(config.new_static_instruments())); make_request(&instruments); assert_histogram_sum!("cost.delta", 90.0); @@ -356,7 +389,7 @@ mod test { let config = config(include_str!( "fixtures/cost_estimated_with_attributes.router.yaml" )); - let instruments = config.to_instruments(); + let instruments = config.to_instruments(Arc::new(config.new_static_instruments())); make_request(&instruments); assert_histogram_sum!("cost.estimated", 100.0, cost.result = "COST_TOO_EXPENSIVE"); @@ -370,7 +403,7 @@ mod test { let config = config(include_str!( "fixtures/cost_actual_with_attributes.router.yaml" )); - let instruments = config.to_instruments(); + let instruments = config.to_instruments(Arc::new(config.new_static_instruments())); make_request(&instruments); assert_histogram_sum!("cost.actual", 10.0, cost.result = "COST_TOO_EXPENSIVE"); @@ -384,7 +417,7 @@ mod test { let config = config(include_str!( "fixtures/cost_delta_with_attributes.router.yaml" )); - let instruments = config.to_instruments(); + let instruments = config.to_instruments(Arc::new(config.new_static_instruments())); make_request(&instruments); assert_histogram_sum!( diff --git a/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.execution/metrics.snap b/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.execution/metrics.snap index d1214d2f8f..0b3e04d9ef 100644 --- a/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.execution/metrics.snap +++ b/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.execution/metrics.snap @@ -10,6 +10,7 @@ info: field.execution: true --- - name: graphql.field.execution + description: Number of times a field is used. data: datapoints: - value: 1 diff --git a/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.length/metrics.snap b/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.length/metrics.snap index cce830a861..d3e0270014 100644 --- a/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.length/metrics.snap +++ b/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.length/metrics.snap @@ -11,6 +11,7 @@ info: list.length: true --- - name: graphql.field.list.length + description: Length of a selected field in the GraphQL response data: datapoints: - sum: 3 diff --git a/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs b/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs index 1178f0a102..7e68446e3d 100644 --- a/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs +++ b/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs @@ -1,32 +1,20 @@ -use std::sync::Arc; - use apollo_compiler::ast::NamedType; use apollo_compiler::executable::Field; use apollo_compiler::ExecutableDocument; -use opentelemetry::metrics::MeterProvider; -use parking_lot::Mutex; use schemars::JsonSchema; use serde::Deserialize; use serde_json_bytes::Value; use tower::BoxError; use super::instruments::CustomCounter; -use super::instruments::CustomCounterInner; use super::instruments::CustomInstruments; -use super::instruments::Increment; -use super::instruments::InstrumentsConfig; -use super::instruments::METER_NAME; use crate::graphql::ResponseVisitor; -use crate::metrics; use crate::plugins::telemetry::config_new::attributes::DefaultAttributeRequirementLevel; -use crate::plugins::telemetry::config_new::conditions::Condition; use crate::plugins::telemetry::config_new::extendable::Extendable; use crate::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes; use crate::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector; use crate::plugins::telemetry::config_new::graphql::selectors::GraphQLValue; -use crate::plugins::telemetry::config_new::graphql::selectors::ListLength; use crate::plugins::telemetry::config_new::instruments::CustomHistogram; -use crate::plugins::telemetry::config_new::instruments::CustomHistogramInner; use crate::plugins::telemetry::config_new::instruments::DefaultedStandardInstrument; use crate::plugins::telemetry::config_new::instruments::Instrumented; use crate::plugins::telemetry::config_new::DefaultForLevel; @@ -37,8 +25,8 @@ use crate::Context; pub(crate) mod attributes; pub(crate) mod selectors; -static FIELD_LENGTH: &str = "graphql.field.list.length"; -static FIELD_EXECUTION: &str = "graphql.field.execution"; +pub(crate) const FIELD_LENGTH: &str = "graphql.field.list.length"; +pub(crate) const FIELD_EXECUTION: &str = "graphql.field.execution"; #[derive(Deserialize, JsonSchema, Clone, Default, Debug)] #[serde(deny_unknown_fields, default)] @@ -98,67 +86,6 @@ pub(crate) struct GraphQLInstruments { pub(crate) custom: GraphQLCustomInstruments, } -impl From<&InstrumentsConfig> for GraphQLInstruments { - fn from(value: &InstrumentsConfig) -> Self { - let meter = metrics::meter_provider().meter(METER_NAME); - GraphQLInstruments { - list_length: value.graphql.attributes.list_length.is_enabled().then(|| { - let mut nb_attributes = 0; - let selectors = match &value.graphql.attributes.list_length { - DefaultedStandardInstrument::Bool(_) | DefaultedStandardInstrument::Unset => { - None - } - DefaultedStandardInstrument::Extendable { attributes } => { - nb_attributes = attributes.custom.len(); - Some(attributes.clone()) - } - }; - CustomHistogram { - inner: Mutex::new(CustomHistogramInner { - increment: Increment::FieldCustom(None), - condition: Condition::True, - histogram: Some(meter.f64_histogram(FIELD_LENGTH).init()), - attributes: Vec::with_capacity(nb_attributes), - selector: Some(Arc::new(GraphQLSelector::ListLength { - list_length: ListLength::Value, - })), - selectors, - updated: false, - }), - } - }), - field_execution: value - .graphql - .attributes - .field_execution - .is_enabled() - .then(|| { - let mut nb_attributes = 0; - let selectors = match &value.graphql.attributes.field_execution { - DefaultedStandardInstrument::Bool(_) - | DefaultedStandardInstrument::Unset => None, - DefaultedStandardInstrument::Extendable { attributes } => { - nb_attributes = attributes.custom.len(); - Some(attributes.clone()) - } - }; - CustomCounter { - inner: Mutex::new(CustomCounterInner { - increment: Increment::FieldUnit, - condition: Condition::True, - counter: Some(meter.f64_counter(FIELD_EXECUTION).init()), - attributes: Vec::with_capacity(nb_attributes), - selector: None, - selectors, - incremented: false, - }), - } - }), - custom: CustomInstruments::new(&value.graphql.custom), - } - } -} - impl Instrumented for GraphQLInstruments { type Request = supergraph::Request; type Response = supergraph::Response; @@ -327,12 +254,11 @@ pub(crate) mod test { .build() .unwrap(); - let harness = PluginTestHarness::::builder() + let harness: PluginTestHarness = PluginTestHarness::::builder() .config(include_str!("fixtures/field_length_enabled.router.yaml")) .schema(schema_str) .build() .await; - harness .call_supergraph(request, |req| { let response: serde_json::Value = serde_json::from_str(include_str!( diff --git a/apollo-router/src/plugins/telemetry/config_new/instruments.rs b/apollo-router/src/plugins/telemetry/config_new/instruments.rs index d9b758f42d..3b112a6f2d 100644 --- a/apollo-router/src/plugins/telemetry/config_new/instruments.rs +++ b/apollo-router/src/plugins/telemetry/config_new/instruments.rs @@ -22,7 +22,14 @@ use tower::BoxError; use super::attributes::HttpServerAttributes; use super::cache::attributes::CacheAttributes; +use super::cache::CacheInstruments; use super::cache::CacheInstrumentsConfig; +use super::cache::CACHE_METRIC; +use super::graphql::selectors::ListLength; +use super::graphql::GraphQLInstruments; +use super::graphql::FIELD_EXECUTION; +use super::graphql::FIELD_LENGTH; +use super::selectors::CacheKind; use super::DefaultForLevel; use super::Selector; use crate::metrics; @@ -86,6 +93,15 @@ pub(crate) struct InstrumentsConfig { >, } +const HTTP_SERVER_REQUEST_DURATION_METRIC: &str = "http.server.request.duration"; +const HTTP_SERVER_REQUEST_BODY_SIZE_METRIC: &str = "http.server.request.body.size"; +const HTTP_SERVER_RESPONSE_BODY_SIZE_METRIC: &str = "http.server.response.body.size"; +const HTTP_SERVER_ACTIVE_REQUESTS: &str = "http.server.active_requests"; + +const HTTP_CLIENT_REQUEST_DURATION_METRIC: &str = "http.client.request.duration"; +const HTTP_CLIENT_REQUEST_BODY_SIZE_METRIC: &str = "http.client.request.body.size"; +const HTTP_CLIENT_RESPONSE_BODY_SIZE_METRIC: &str = "http.client.response.body.size"; + impl InstrumentsConfig { /// Update the defaults for spans configuration regarding the `default_attribute_requirement_level` pub(crate) fn update_defaults(&mut self) { @@ -100,8 +116,118 @@ impl InstrumentsConfig { .defaults_for_levels(self.default_requirement_level, TelemetryDataKind::Metrics); } - pub(crate) fn new_router_instruments(&self) -> RouterInstruments { + pub(crate) fn new_builtin_router_instruments(&self) -> HashMap { let meter = metrics::meter_provider().meter(METER_NAME); + let mut static_instruments = HashMap::with_capacity(self.router.custom.len()); + + if self + .router + .attributes + .http_server_request_duration + .is_enabled() + { + static_instruments.insert( + HTTP_SERVER_REQUEST_DURATION_METRIC.to_string(), + StaticInstrument::Histogram( + meter + .f64_histogram(HTTP_SERVER_REQUEST_DURATION_METRIC) + .with_unit(Unit::new("s")) + .with_description("Duration of HTTP server requests.") + .init(), + ), + ); + } + + if self + .router + .attributes + .http_server_request_body_size + .is_enabled() + { + static_instruments.insert( + HTTP_SERVER_REQUEST_BODY_SIZE_METRIC.to_string(), + StaticInstrument::Histogram( + meter + .f64_histogram(HTTP_SERVER_REQUEST_BODY_SIZE_METRIC) + .with_unit(Unit::new("By")) + .with_description("Size of HTTP server request bodies.") + .init(), + ), + ); + } + + if self + .router + .attributes + .http_server_response_body_size + .is_enabled() + { + static_instruments.insert( + HTTP_SERVER_RESPONSE_BODY_SIZE_METRIC.to_string(), + StaticInstrument::Histogram( + meter + .f64_histogram(HTTP_SERVER_RESPONSE_BODY_SIZE_METRIC) + .with_unit(Unit::new("By")) + .with_description("Size of HTTP server response bodies.") + .init(), + ), + ); + } + + if self + .router + .attributes + .http_server_active_requests + .is_enabled() + { + static_instruments.insert( + HTTP_SERVER_ACTIVE_REQUESTS.to_string(), + StaticInstrument::UpDownCounterI64( + meter + .i64_up_down_counter(HTTP_SERVER_ACTIVE_REQUESTS) + .with_unit(Unit::new("request")) + .with_description("Number of active HTTP server requests.") + .init(), + ), + ); + } + + for (instrument_name, instrument) in &self.router.custom { + match instrument.ty { + InstrumentType::Counter => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::CounterF64( + meter + .f64_counter(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + InstrumentType::Histogram => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::Histogram( + meter + .f64_histogram(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + } + } + + static_instruments + } + + pub(crate) fn new_router_instruments( + &self, + static_instruments: Arc>, + ) -> RouterInstruments { let http_server_request_duration = self .router .attributes @@ -112,11 +238,16 @@ impl InstrumentsConfig { increment: Increment::Duration(Instant::now()), condition: Condition::True, histogram: Some( - meter - .f64_histogram("http.server.request.duration") - .with_unit(Unit::new("s")) - .with_description("Duration of HTTP server requests.") - .init(), + static_instruments + .get(HTTP_SERVER_REQUEST_DURATION_METRIC) + .expect( + "cannot get static instrument for router; this should not happen", + ) + .as_histogram() + .cloned() + .expect( + "cannot convert instrument to histogram for router; this should not happen", + ), ), attributes: Vec::new(), selector: None, @@ -150,11 +281,15 @@ impl InstrumentsConfig { increment: Increment::Custom(None), condition: Condition::True, histogram: Some( - meter - .f64_histogram("http.server.request.body.size") - .with_unit(Unit::new("By")) - .with_description("Size of HTTP server request bodies.") - .init(), + static_instruments + .get(HTTP_SERVER_REQUEST_BODY_SIZE_METRIC) + .expect( + "cannot get static instrument for router; this should not happen", + ) + .as_histogram() + .cloned().expect( + "cannot convert instrument to histogram for router; this should not happen", + ) ), attributes: Vec::with_capacity(nb_attributes), selector: Some(Arc::new(RouterSelector::RequestHeader { @@ -188,11 +323,16 @@ impl InstrumentsConfig { increment: Increment::Custom(None), condition: Condition::True, histogram: Some( - meter - .f64_histogram("http.server.response.body.size") - .with_unit(Unit::new("By")) - .with_description("Size of HTTP server response bodies.") - .init(), + static_instruments + .get(HTTP_SERVER_RESPONSE_BODY_SIZE_METRIC) + .expect( + "cannot get static instrument for router; this should not happen", + ) + .as_histogram() + .cloned() + .expect( + "cannot convert instrument to histogram for router; this should not happen", + ) ), attributes: Vec::with_capacity(nb_attributes), selector: Some(Arc::new(RouterSelector::ResponseHeader { @@ -213,11 +353,16 @@ impl InstrumentsConfig { .then(|| ActiveRequestsCounter { inner: Mutex::new(ActiveRequestsCounterInner { counter: Some( - meter - .i64_up_down_counter("http.server.active_requests") - .with_unit(Unit::new("request")) - .with_description("Number of active HTTP server requests.") - .init(), + static_instruments + .get(HTTP_SERVER_ACTIVE_REQUESTS) + .expect( + "cannot get static instrument for router; this should not happen", + ) + .as_up_down_counter_i64() + .cloned() + .expect( + "cannot convert instrument to up and down counter for router; this should not happen", + ), ), attrs_config: match &self.router.attributes.http_server_active_requests { DefaultedStandardInstrument::Bool(_) @@ -234,19 +379,155 @@ impl InstrumentsConfig { http_server_request_body_size, http_server_response_body_size, http_server_active_requests, - custom: CustomInstruments::new(&self.router.custom), + custom: CustomInstruments::new(&self.router.custom, static_instruments), + } + } + + pub(crate) fn new_builtin_supergraph_instruments(&self) -> HashMap { + let meter = metrics::meter_provider().meter(METER_NAME); + + let mut static_instruments = HashMap::with_capacity(self.supergraph.custom.len()); + for (instrument_name, instrument) in &self.supergraph.custom { + match instrument.ty { + InstrumentType::Counter => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::CounterF64( + meter + .f64_counter(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + InstrumentType::Histogram => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::Histogram( + meter + .f64_histogram(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + } } + static_instruments.extend(self.supergraph.attributes.cost.new_static_instruments()); + + static_instruments } - pub(crate) fn new_supergraph_instruments(&self) -> SupergraphInstruments { + pub(crate) fn new_supergraph_instruments( + &self, + static_instruments: Arc>, + ) -> SupergraphInstruments { SupergraphInstruments { - cost: self.supergraph.attributes.cost.to_instruments(), - custom: CustomInstruments::new(&self.supergraph.custom), + cost: self + .supergraph + .attributes + .cost + .to_instruments(static_instruments.clone()), + custom: CustomInstruments::new(&self.supergraph.custom, static_instruments), } } - pub(crate) fn new_subgraph_instruments(&self) -> SubgraphInstruments { + pub(crate) fn new_builtin_subgraph_instruments(&self) -> HashMap { let meter = metrics::meter_provider().meter(METER_NAME); + let mut static_instruments = HashMap::with_capacity(self.subgraph.custom.len()); + + if self + .subgraph + .attributes + .http_client_request_duration + .is_enabled() + { + static_instruments.insert( + HTTP_CLIENT_REQUEST_DURATION_METRIC.to_string(), + StaticInstrument::Histogram( + meter + .f64_histogram(HTTP_CLIENT_REQUEST_DURATION_METRIC) + .with_unit(Unit::new("s")) + .with_description("Duration of HTTP client requests.") + .init(), + ), + ); + } + + if self + .subgraph + .attributes + .http_client_request_body_size + .is_enabled() + { + static_instruments.insert( + HTTP_CLIENT_REQUEST_BODY_SIZE_METRIC.to_string(), + StaticInstrument::Histogram( + meter + .f64_histogram(HTTP_CLIENT_REQUEST_BODY_SIZE_METRIC) + .with_unit(Unit::new("By")) + .with_description("Size of HTTP client request bodies.") + .init(), + ), + ); + } + + if self + .subgraph + .attributes + .http_client_response_body_size + .is_enabled() + { + static_instruments.insert( + HTTP_CLIENT_RESPONSE_BODY_SIZE_METRIC.to_string(), + StaticInstrument::Histogram( + meter + .f64_histogram(HTTP_CLIENT_RESPONSE_BODY_SIZE_METRIC) + .with_unit(Unit::new("By")) + .with_description("Size of HTTP client response bodies.") + .init(), + ), + ); + } + + for (instrument_name, instrument) in &self.subgraph.custom { + match instrument.ty { + InstrumentType::Counter => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::CounterF64( + meter + .f64_counter(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + InstrumentType::Histogram => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::Histogram( + meter + .f64_histogram(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + } + } + + static_instruments + } + + pub(crate) fn new_subgraph_instruments( + &self, + static_instruments: Arc>, + ) -> SubgraphInstruments { let http_client_request_duration = self.subgraph .attributes @@ -266,12 +547,16 @@ impl InstrumentsConfig { inner: Mutex::new(CustomHistogramInner { increment: Increment::Duration(Instant::now()), condition: Condition::True, - histogram: Some( - meter - .f64_histogram("http.client.request.duration") - .with_unit(Unit::new("s")) - .with_description("Duration of HTTP client requests.") - .init(), + histogram: Some(static_instruments + .get(HTTP_CLIENT_REQUEST_DURATION_METRIC) + .expect( + "cannot get static instrument for subgraph; this should not happen", + ) + .as_histogram() + .cloned() + .expect( + "cannot convert instrument to histogram for subgraph; this should not happen", + ) ), attributes: Vec::with_capacity(nb_attributes), selector: None, @@ -299,12 +584,16 @@ impl InstrumentsConfig { inner: Mutex::new(CustomHistogramInner { increment: Increment::Custom(None), condition: Condition::True, - histogram: Some( - meter - .f64_histogram("http.client.request.body.size") - .with_unit(Unit::new("By")) - .with_description("Size of HTTP client request bodies.") - .init(), + histogram: Some(static_instruments + .get(HTTP_CLIENT_REQUEST_BODY_SIZE_METRIC) + .expect( + "cannot get static instrument for subgraph; this should not happen", + ) + .as_histogram() + .cloned() + .expect( + "cannot convert instrument to histogram for subgraph; this should not happen", + ) ), attributes: Vec::with_capacity(nb_attributes), selector: Some(Arc::new(SubgraphSelector::SubgraphRequestHeader { @@ -336,12 +625,16 @@ impl InstrumentsConfig { inner: Mutex::new(CustomHistogramInner { increment: Increment::Custom(None), condition: Condition::True, - histogram: Some( - meter - .f64_histogram("http.client.response.body.size") - .with_unit(Unit::new("By")) - .with_description("Size of HTTP client response bodies.") - .init(), + histogram: Some(static_instruments + .get(HTTP_CLIENT_RESPONSE_BODY_SIZE_METRIC) + .expect( + "cannot get static instrument for subgraph; this should not happen", + ) + .as_histogram() + .cloned() + .expect( + "cannot convert instrument to histogram for subgraph; this should not happen", + ) ), attributes: Vec::with_capacity(nb_attributes), selector: Some(Arc::new(SubgraphSelector::SubgraphResponseHeader { @@ -358,7 +651,243 @@ impl InstrumentsConfig { http_client_request_duration, http_client_request_body_size, http_client_response_body_size, - custom: CustomInstruments::new(&self.subgraph.custom), + custom: CustomInstruments::new(&self.subgraph.custom, static_instruments), + } + } + + pub(crate) fn new_builtin_graphql_instruments(&self) -> HashMap { + let meter = metrics::meter_provider().meter(METER_NAME); + let mut static_instruments = HashMap::with_capacity(self.graphql.custom.len()); + if self.graphql.attributes.list_length.is_enabled() { + static_instruments.insert( + FIELD_LENGTH.to_string(), + StaticInstrument::Histogram( + meter + .f64_histogram(FIELD_LENGTH) + .with_description("Length of a selected field in the GraphQL response") + .init(), + ), + ); + } + + if self.graphql.attributes.field_execution.is_enabled() { + static_instruments.insert( + FIELD_EXECUTION.to_string(), + StaticInstrument::CounterF64( + meter + .f64_counter(FIELD_EXECUTION) + .with_description("Number of times a field is used.") + .init(), + ), + ); + } + + for (instrument_name, instrument) in &self.graphql.custom { + match instrument.ty { + InstrumentType::Counter => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::CounterF64( + meter + .f64_counter(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + InstrumentType::Histogram => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::Histogram( + meter + .f64_histogram(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + } + } + + static_instruments + } + + pub(crate) fn new_graphql_instruments( + &self, + static_instruments: Arc>, + ) -> GraphQLInstruments { + GraphQLInstruments { + list_length: self.graphql.attributes.list_length.is_enabled().then(|| { + let mut nb_attributes = 0; + let selectors = match &self.graphql.attributes.list_length { + DefaultedStandardInstrument::Bool(_) | DefaultedStandardInstrument::Unset => { + None + } + DefaultedStandardInstrument::Extendable { attributes } => { + nb_attributes = attributes.custom.len(); + Some(attributes.clone()) + } + }; + CustomHistogram { + inner: Mutex::new(CustomHistogramInner { + increment: Increment::FieldCustom(None), + condition: Condition::True, + histogram: Some(static_instruments + .get(FIELD_LENGTH) + .expect( + "cannot get static instrument for graphql; this should not happen", + ) + .as_histogram() + .cloned() + .expect( + "cannot convert instrument to counter for graphql; this should not happen", + ) + ), + attributes: Vec::with_capacity(nb_attributes), + selector: Some(Arc::new(GraphQLSelector::ListLength { + list_length: ListLength::Value, + })), + selectors, + updated: false, + }), + } + }), + field_execution: self + .graphql + .attributes + .field_execution + .is_enabled() + .then(|| { + let mut nb_attributes = 0; + let selectors = match &self.graphql.attributes.field_execution { + DefaultedStandardInstrument::Bool(_) + | DefaultedStandardInstrument::Unset => None, + DefaultedStandardInstrument::Extendable { attributes } => { + nb_attributes = attributes.custom.len(); + Some(attributes.clone()) + } + }; + CustomCounter { + inner: Mutex::new(CustomCounterInner { + increment: Increment::FieldUnit, + condition: Condition::True, + counter: Some(static_instruments + .get(FIELD_EXECUTION) + .expect( + "cannot get static instrument for graphql; this should not happen", + ) + .as_counter_f64() + .cloned() + .expect( + "cannot convert instrument to counter for graphql; this should not happen", + ) + ), + attributes: Vec::with_capacity(nb_attributes), + selector: None, + selectors, + incremented: false, + }), + } + }), + custom: CustomInstruments::new(&self.graphql.custom, static_instruments), + } + } + + pub(crate) fn new_builtin_cache_instruments(&self) -> HashMap { + let meter = metrics::meter_provider().meter(METER_NAME); + let mut static_instruments: HashMap = HashMap::new(); + if self.cache.attributes.cache.is_enabled() { + static_instruments.insert( + CACHE_METRIC.to_string(), + StaticInstrument::CounterF64( + meter + .f64_counter(CACHE_METRIC) + .with_unit(Unit::new("ops")) + .with_description("Entity cache hit/miss operations at the subgraph level") + .init(), + ), + ); + } + + static_instruments + } + + pub(crate) fn new_cache_instruments( + &self, + static_instruments: Arc>, + ) -> CacheInstruments { + CacheInstruments { + cache_hit: self.cache.attributes.cache.is_enabled().then(|| { + let mut nb_attributes = 0; + let selectors = match &self.cache.attributes.cache { + DefaultedStandardInstrument::Bool(_) | DefaultedStandardInstrument::Unset => { + None + } + DefaultedStandardInstrument::Extendable { attributes } => { + nb_attributes = attributes.custom.len(); + Some(attributes.clone()) + } + }; + CustomCounter { + inner: Mutex::new(CustomCounterInner { + increment: Increment::Custom(None), + condition: Condition::True, + counter: Some(static_instruments + .get(CACHE_METRIC) + .expect( + "cannot get static instrument for cache; this should not happen", + ) + .as_counter_f64() + .cloned() + .expect( + "cannot convert instrument to counter for cache; this should not happen", + ) + ), + attributes: Vec::with_capacity(nb_attributes), + selector: Some(Arc::new(SubgraphSelector::Cache { + cache: CacheKind::Hit, + entity_type: None, + })), + selectors, + incremented: false, + }), + } + }), + } + } +} + +#[derive(Debug)] +pub(crate) enum StaticInstrument { + CounterF64(Counter), + UpDownCounterI64(UpDownCounter), + Histogram(Histogram), +} + +impl StaticInstrument { + pub(crate) fn as_counter_f64(&self) -> Option<&Counter> { + if let Self::CounterF64(v) = self { + Some(v) + } else { + None + } + } + + pub(crate) fn as_up_down_counter_i64(&self) -> Option<&UpDownCounter> { + if let Self::UpDownCounterI64(v) = self { + Some(v) + } else { + None + } + } + + pub(crate) fn as_histogram(&self) -> Option<&Histogram> { + if let Self::Histogram(v) = self { + Some(v) + } else { + None } } } @@ -831,10 +1360,10 @@ where { pub(crate) fn new( config: &HashMap>, + static_instruments: Arc>, ) -> Self { let mut counters = Vec::new(); let mut histograms = Vec::new(); - let meter = metrics::meter_provider().meter(METER_NAME); for (instrument_name, instrument) in config { match instrument.ty { @@ -864,25 +1393,32 @@ where } }, }; - let counter = CustomCounterInner { - increment, - condition: instrument.condition.clone(), - counter: Some( - meter - .f64_counter(instrument_name.clone()) - .with_description(instrument.description.clone()) - .with_unit(Unit::new(instrument.unit.clone())) - .init(), - ), - attributes: Vec::new(), - selector, - selectors: Some(instrument.attributes.clone()), - incremented: false, - }; - - counters.push(CustomCounter { - inner: Mutex::new(counter), - }) + match static_instruments + .get(instrument_name) + .expect( + "cannot get static instrument for supergraph; this should not happen", + ) + .as_counter_f64() + .cloned() + { + Some(counter) => { + let counter = CustomCounterInner { + increment, + condition: instrument.condition.clone(), + counter: Some(counter), + attributes: Vec::new(), + selector, + selectors: Some(instrument.attributes.clone()), + incremented: false, + }; + counters.push(CustomCounter { + inner: Mutex::new(counter), + }) + } + None => { + ::tracing::error!("cannot convert static instrument into a counter, this is an error; please fill an issue on GitHub"); + } + } } InstrumentType::Histogram => { let (selector, increment) = match (&instrument.value).into() { @@ -910,25 +1446,34 @@ where } }, }; - let histogram = CustomHistogramInner { - increment, - condition: instrument.condition.clone(), - histogram: Some( - meter - .f64_histogram(instrument_name.clone()) - .with_description(instrument.description.clone()) - .with_unit(Unit::new(instrument.unit.clone())) - .init(), - ), - attributes: Vec::new(), - selector, - selectors: Some(instrument.attributes.clone()), - updated: false, - }; - histograms.push(CustomHistogram { - inner: Mutex::new(histogram), - }) + match static_instruments + .get(instrument_name) + .expect( + "cannot get static instrument for supergraph; this should not happen", + ) + .as_histogram() + .cloned() + { + Some(histogram) => { + let histogram = CustomHistogramInner { + increment, + condition: instrument.condition.clone(), + histogram: Some(histogram), + attributes: Vec::new(), + selector, + selectors: Some(instrument.attributes.clone()), + updated: false, + }; + + histograms.push(CustomHistogram { + inner: Mutex::new(histogram), + }); + } + None => { + ::tracing::error!("cannot convert static instrument into a histogram, this is an error; please fill an issue on GitHub"); + } + } } } } @@ -2298,7 +2843,10 @@ mod tests { let mut supergraph_instruments = None; let mut subgraph_instruments = None; let mut cache_instruments: Option = None; - let graphql_instruments: GraphQLInstruments = (&config).into(); + let graphql_instruments: GraphQLInstruments = config + .new_graphql_instruments(Arc::new( + config.new_builtin_graphql_instruments(), + )); let context = Context::new(); for event in request { match event { @@ -2316,7 +2864,9 @@ mod tests { .body(body) .build() .unwrap(); - router_instruments = Some(config.new_router_instruments()); + router_instruments = Some(config.new_router_instruments( + Arc::new(config.new_builtin_router_instruments()), + )); router_instruments .as_mut() .expect("router instruments") @@ -2352,7 +2902,9 @@ mod tests { headers, } => { supergraph_instruments = - Some(config.new_supergraph_instruments()); + Some(config.new_supergraph_instruments(Arc::new( + config.new_builtin_supergraph_instruments(), + ))); let mut request = supergraph::Request::fake_builder() .context(context.clone()) @@ -2404,8 +2956,12 @@ mod tests { extensions, headers, } => { - subgraph_instruments = Some(config.new_subgraph_instruments()); - cache_instruments = Some((&config).into()); + subgraph_instruments = Some(config.new_subgraph_instruments( + Arc::new(config.new_builtin_subgraph_instruments()), + )); + cache_instruments = Some(config.new_cache_instruments( + Arc::new(config.new_builtin_cache_instruments()), + )); let graphql_request = graphql::Request::fake_builder() .query(query) .and_operation_name(operation_name) @@ -2701,7 +3257,8 @@ mod tests { ) .unwrap(); - let router_instruments = config.new_router_instruments(); + let router_instruments = + config.new_router_instruments(Arc::new(config.new_builtin_router_instruments())); let router_req = RouterRequest::fake_builder() .header("conditional-custom", "X") .header("x-my-header-count", "55") @@ -2739,7 +3296,8 @@ mod tests { "acme.my_attribute" = "TEST" ); - let router_instruments = config.new_router_instruments(); + let router_instruments = + config.new_router_instruments(Arc::new(config.new_builtin_router_instruments())); let router_req = RouterRequest::fake_builder() .header("content-length", "35") .header("x-my-header-count", "5") @@ -2780,7 +3338,8 @@ mod tests { "acme.my_attribute" = "unknown" ); - let router_instruments = config.new_router_instruments(); + let router_instruments = + config.new_router_instruments(Arc::new(config.new_builtin_router_instruments())); let router_req = RouterRequest::fake_builder() .header("content-length", "35") .header("content-type", "application/graphql") @@ -2809,7 +3368,8 @@ mod tests { "http.response.status_code" = 400 ); - let router_instruments = config.new_router_instruments(); + let router_instruments = + config.new_router_instruments(Arc::new(config.new_builtin_router_instruments())); let router_req = RouterRequest::fake_builder() .header("content-length", "35") .header("content-type", "application/graphql") @@ -2947,7 +3507,10 @@ mod tests { ) .unwrap(); - let custom_instruments = SupergraphCustomInstruments::new(&config.supergraph.custom); + let custom_instruments = SupergraphCustomInstruments::new( + &config.supergraph.custom, + Arc::new(config.new_builtin_supergraph_instruments()), + ); let context = crate::context::Context::new(); let _ = context.insert(OPERATION_KIND, "query".to_string()).unwrap(); let context_with_error = crate::context::Context::new(); @@ -3012,7 +3575,10 @@ mod tests { ); assert_counter!("acme.request.on_graphql_data", 500.0, response.data = 500); - let custom_instruments = SupergraphCustomInstruments::new(&config.supergraph.custom); + let custom_instruments = SupergraphCustomInstruments::new( + &config.supergraph.custom, + Arc::new(config.new_builtin_supergraph_instruments()), + ); let supergraph_req = supergraph::Request::fake_builder() .header("content-length", "35") .header("x-my-header-count", "5") @@ -3066,7 +3632,10 @@ mod tests { ); assert_counter!("acme.request.on_graphql_data", 1000.0, response.data = 500); - let custom_instruments = SupergraphCustomInstruments::new(&config.supergraph.custom); + let custom_instruments = SupergraphCustomInstruments::new( + &config.supergraph.custom, + Arc::new(config.new_builtin_supergraph_instruments()), + ); let supergraph_req = supergraph::Request::fake_builder() .header("content-length", "35") .header("content-type", "application/graphql") diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index af5f78a0de..42b34cbfae 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -10,6 +10,8 @@ use ::tracing::info_span; use ::tracing::Span; use axum::headers::HeaderName; use config_new::cache::CacheInstruments; +use config_new::instruments::InstrumentsConfig; +use config_new::instruments::StaticInstrument; use config_new::Selectors; use dashmap::DashMap; use futures::future::ready; @@ -43,6 +45,7 @@ use opentelemetry::KeyValue; use opentelemetry_api::trace::TraceId; use opentelemetry_semantic_conventions::trace::HTTP_REQUEST_METHOD; use parking_lot::Mutex; +use parking_lot::RwLock; use rand::Rng; use router_bridge::planner::UsageReporting; use serde_json_bytes::json; @@ -198,7 +201,11 @@ pub(crate) struct Telemetry { apollo_metrics_sender: apollo_exporter::Sender, field_level_instrumentation_ratio: f64, sampling_filter_ratio: SamplerOption, - + pub(crate) graphql_custom_instruments: RwLock>>, + router_custom_instruments: RwLock>>, + supergraph_custom_instruments: RwLock>>, + subgraph_custom_instruments: RwLock>>, + cache_custom_instruments: RwLock>>, activation: Mutex, } @@ -252,6 +259,24 @@ impl Drop for Telemetry { } } +struct BuiltinInstruments { + graphql_custom_instruments: Arc>, + router_custom_instruments: Arc>, + supergraph_custom_instruments: Arc>, + subgraph_custom_instruments: Arc>, + cache_custom_instruments: Arc>, +} + +fn create_builtin_instruments(config: &InstrumentsConfig) -> BuiltinInstruments { + BuiltinInstruments { + graphql_custom_instruments: Arc::new(config.new_builtin_graphql_instruments()), + router_custom_instruments: Arc::new(config.new_builtin_router_instruments()), + supergraph_custom_instruments: Arc::new(config.new_builtin_supergraph_instruments()), + subgraph_custom_instruments: Arc::new(config.new_builtin_subgraph_instruments()), + cache_custom_instruments: Arc::new(config.new_builtin_cache_instruments()), + } +} + #[async_trait::async_trait] impl Plugin for Telemetry { type Config = config::Conf; @@ -275,6 +300,14 @@ impl Plugin for Telemetry { ::tracing::warn!("telemetry.instrumentation.spans.mode is currently set to 'deprecated', either explicitly or via defaulting. Set telemetry.instrumentation.spans.mode explicitly in your router.yaml to 'spec_compliant' for log and span attributes that follow OpenTelemetry semantic conventions. This option will be defaulted to 'spec_compliant' in a future release and eventually removed altogether"); } + let BuiltinInstruments { + graphql_custom_instruments, + router_custom_instruments, + supergraph_custom_instruments, + subgraph_custom_instruments, + cache_custom_instruments, + } = create_builtin_instruments(&config.instrumentation.instruments); + Ok(Telemetry { custom_endpoints: metrics_builder.custom_endpoints, apollo_metrics_sender: metrics_builder.apollo_metrics_sender, @@ -292,6 +325,11 @@ impl Plugin for Telemetry { .map(FilterMeterProvider::public), is_active: false, }), + graphql_custom_instruments: RwLock::new(graphql_custom_instruments), + router_custom_instruments: RwLock::new(router_custom_instruments), + supergraph_custom_instruments: RwLock::new(supergraph_custom_instruments), + subgraph_custom_instruments: RwLock::new(subgraph_custom_instruments), + cache_custom_instruments: RwLock::new(cache_custom_instruments), sampling_filter_ratio, config: Arc::new(config), }) @@ -306,6 +344,7 @@ impl Plugin for Telemetry { matches!(config.instrumentation.spans.mode, SpanMode::Deprecated); let field_level_instrumentation_ratio = self.field_level_instrumentation_ratio; let metrics_sender = self.apollo_metrics_sender.clone(); + let static_router_instruments = self.router_custom_instruments.read().clone(); ServiceBuilder::new() .map_response(move |response: router::Response| { @@ -400,7 +439,7 @@ impl Plugin for Telemetry { let custom_instruments: RouterInstruments = config_request .instrumentation .instruments - .new_router_instruments(); + .new_router_instruments(static_router_instruments.clone()); custom_instruments.on_request(request); let custom_events: RouterEvents = @@ -527,6 +566,8 @@ impl Plugin for Telemetry { let config_map_res_first = config.clone(); let config_map_res = config.clone(); let field_level_instrumentation_ratio = self.field_level_instrumentation_ratio; + let static_supergraph_instruments = self.supergraph_custom_instruments.read().clone(); + let static_graphql_instruments = self.graphql_custom_instruments.read().clone(); ServiceBuilder::new() .instrument(move |supergraph_req: &SupergraphRequest| span_mode.create_supergraph( &config_instrument.apollo, @@ -591,11 +632,11 @@ impl Plugin for Telemetry { let custom_instruments = config .instrumentation .instruments - .new_supergraph_instruments(); + .new_supergraph_instruments(static_supergraph_instruments.clone()); custom_instruments.on_request(req); - let custom_graphql_instruments:GraphQLInstruments = (&config + let custom_graphql_instruments: GraphQLInstruments = config .instrumentation - .instruments).into(); + .instruments.new_graphql_instruments(static_graphql_instruments.clone()); custom_graphql_instruments.on_request(req); let supergraph_events = config.instrumentation.events.new_supergraph_events(); @@ -690,6 +731,8 @@ impl Plugin for Telemetry { let subgraph_metrics_conf_resp = subgraph_metrics_conf_req.clone(); let subgraph_name = ByteString::from(name); let name = name.to_owned(); + let static_subgraph_instruments = self.subgraph_custom_instruments.read().clone(); + let static_cache_instruments = self.cache_custom_instruments.read().clone(); ServiceBuilder::new() .instrument(move |req: &SubgraphRequest| span_mode.create_subgraph(name.as_str(), req)) .map_request(move |req: SubgraphRequest| request_ftv1(req)) @@ -710,13 +753,15 @@ impl Plugin for Telemetry { let custom_instruments = config .instrumentation .instruments - .new_subgraph_instruments(); + .new_subgraph_instruments(static_subgraph_instruments.clone()); custom_instruments.on_request(sub_request); let custom_events = config.instrumentation.events.new_subgraph_events(); custom_events.on_request(sub_request); - let custom_cache_instruments: CacheInstruments = - (&config.instrumentation.instruments).into(); + let custom_cache_instruments: CacheInstruments = config + .instrumentation + .instruments + .new_cache_instruments(static_cache_instruments.clone()); custom_cache_instruments.on_request(sub_request); ( @@ -843,6 +888,20 @@ impl Telemetry { activation.reload_metrics(); + let BuiltinInstruments { + graphql_custom_instruments, + router_custom_instruments, + supergraph_custom_instruments, + subgraph_custom_instruments, + cache_custom_instruments, + } = create_builtin_instruments(&self.config.instrumentation.instruments); + + *self.graphql_custom_instruments.write() = graphql_custom_instruments; + *self.router_custom_instruments.write() = router_custom_instruments; + *self.supergraph_custom_instruments.write() = supergraph_custom_instruments; + *self.subgraph_custom_instruments.write() = subgraph_custom_instruments; + *self.cache_custom_instruments.write() = cache_custom_instruments; + reload_fmt(create_fmt_layer(&self.config)); activation.is_active = true; } From 552e4c3650ba03df1105c285bbd7e504dc905ed0 Mon Sep 17 00:00:00 2001 From: Tyler Bloom Date: Thu, 25 Jul 2024 12:03:17 -0400 Subject: [PATCH 007/108] Fixed serialization panic in snapshot macro (#5724) --- Cargo.lock | 16 ++++++++++++++++ apollo-federation/Cargo.toml | 3 ++- apollo-federation/src/operation/mod.rs | 8 +++++++- apollo-federation/src/query_graph/graph_path.rs | 3 +++ apollo-federation/src/utils/logging.rs | 4 ++-- 5 files changed, 30 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f6964d44da..2a345f0f1e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -436,6 +436,7 @@ dependencies = [ "multimap 0.10.0", "nom", "petgraph", + "ron", "serde", "serde_json", "serde_json_bytes", @@ -1615,6 +1616,9 @@ name = "bitflags" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +dependencies = [ + "serde", +] [[package]] name = "block-buffer" @@ -6093,6 +6097,18 @@ dependencies = [ "paste", ] +[[package]] +name = "ron" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" +dependencies = [ + "base64 0.21.7", + "bitflags 2.6.0", + "serde", + "serde_derive", +] + [[package]] name = "router-bridge" version = "0.5.27+v2.8.1" diff --git a/apollo-federation/Cargo.toml b/apollo-federation/Cargo.toml index 4a5aa3a1aa..42bd2b74c6 100644 --- a/apollo-federation/Cargo.toml +++ b/apollo-federation/Cargo.toml @@ -14,7 +14,7 @@ autotests = false # Integration tests are m # logging statements that capture serialized versions of key data structures. # This logging is gated behind a feature to avoid any unnecessary (even if # small) runtime costs where this data will not be desired. -snapshot_tracing = [] +snapshot_tracing = ["ron"] [dependencies] apollo-compiler.workspace = true @@ -36,6 +36,7 @@ strum_macros = "0.26.0" thiserror = "1.0" url = "2" tracing = "0.1.40" +ron = { version = "0.8.1", optional = true } [dev-dependencies] hex.workspace = true diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index 468d1e88d4..b04cdefeb3 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -67,7 +67,10 @@ static NEXT_ID: atomic::AtomicUsize = atomic::AtomicUsize::new(1); /// /// Note that we shouldn't add `derive(Serialize, Deserialize)` to this without changing the types /// to be something like UUIDs. -#[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +// NOTE(@TylerBloom): This feature gate can be removed once the condition in the comment above is +// met. Note that there are `serde(skip)` statements that should be removed once this is removed. +#[cfg_attr(feature = "snapshot_tracing", derive(Serialize))] pub(crate) struct SelectionId(usize); impl SelectionId { @@ -631,6 +634,7 @@ pub(crate) enum SelectionKey { }, Defer { /// Unique selection ID used to distinguish deferred fragment spreads that cannot be merged. + #[cfg_attr(not(feature = "snapshot_tracing"), serde(skip))] deferred_id: SelectionId, }, } @@ -1467,6 +1471,7 @@ mod fragment_spread_selection { // on different locations. While we now keep track of those references, they are currently ignored. #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] pub(crate) fragment_directives: Arc, + #[cfg_attr(not(feature = "snapshot_tracing"), serde(skip))] pub(crate) selection_id: SelectionId, } @@ -1759,6 +1764,7 @@ mod inline_fragment_selection { pub(crate) type_condition_position: Option, #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] pub(crate) directives: Arc, + #[cfg_attr(not(feature = "snapshot_tracing"), serde(skip))] pub(crate) selection_id: SelectionId, } diff --git a/apollo-federation/src/query_graph/graph_path.rs b/apollo-federation/src/query_graph/graph_path.rs index b65736311d..ea45c00d1d 100644 --- a/apollo-federation/src/query_graph/graph_path.rs +++ b/apollo-federation/src/query_graph/graph_path.rs @@ -227,6 +227,9 @@ pub(crate) struct SubgraphEnteringEdgeInfo { /// Note that we shouldn't add `derive(Serialize, Deserialize)` to this without changing the types /// to be something like UUIDs. #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +// NOTE(@TylerBloom): This feature gate can be removed once the condition in the comment above is +// met. +#[cfg_attr(feature = "snapshot_tracing", derive(serde::Serialize))] pub(crate) struct OverrideId(usize); /// Global storage for the counter used to allocate `OverrideId`s. diff --git a/apollo-federation/src/utils/logging.rs b/apollo-federation/src/utils/logging.rs index fd7bb4b3d2..c7a07c2ef2 100644 --- a/apollo-federation/src/utils/logging.rs +++ b/apollo-federation/src/utils/logging.rs @@ -25,7 +25,7 @@ macro_rules! snapshot { #[cfg(feature = "snapshot_tracing")] tracing::trace!( snapshot = std::any::type_name_of_val(&$value), - data = serde_json::to_string(&$value).expect(concat!( + data = ron::ser::to_string(&$value).expect(concat!( "Could not serialize value for a snapshot with message: ", $msg )), @@ -36,7 +36,7 @@ macro_rules! snapshot { #[cfg(feature = "snapshot_tracing")] tracing::trace!( snapshot = std::any::type_name_of_val(&$value), - data = serde_json::to_string(&$value).expect(concat!( + data = ron::ser::to_string(&$value).expect(concat!( "Could not serialize value for a snapshot with message: ", $msg )), From aafac71e9a7b5eb663858ca23ebe4ef74ab7dd84 Mon Sep 17 00:00:00 2001 From: Duckki Oe Date: Thu, 25 Jul 2024 09:15:17 -0700 Subject: [PATCH 008/108] Implemented semantic comparison of `requires` and `*_rewrites` fields of `FetchNode` (#5708) - updated dual_query_planner to compare those fields semantically (ignoring their ordering). --- .../src/query_planner/dual_query_planner.rs | 136 +++++++++++++----- apollo-router/src/query_planner/selection.rs | 18 +++ 2 files changed, 115 insertions(+), 39 deletions(-) diff --git a/apollo-router/src/query_planner/dual_query_planner.rs b/apollo-router/src/query_planner/dual_query_planner.rs index 2360b17703..99d825ac12 100644 --- a/apollo-router/src/query_planner/dual_query_planner.rs +++ b/apollo-router/src/query_planner/dual_query_planner.rs @@ -25,6 +25,8 @@ use crate::query_planner::bridge_query_planner::metric_query_planning_plan_durat use crate::query_planner::bridge_query_planner::RUST_QP_MODE; use crate::query_planner::convert::convert_root_query_plan_node; use crate::query_planner::render_diff; +use crate::query_planner::rewrites::DataRewrite; +use crate::query_planner::selection::Selection; use crate::query_planner::DeferredNode; use crate::query_planner::PlanNode; use crate::query_planner::Primary; @@ -184,13 +186,13 @@ fn fetch_node_matches(this: &FetchNode, other: &FetchNode) -> bool { authorization, } = this; *service_name == other.service_name - && *requires == other.requires + && same_selection_set_sorted(requires, &other.requires) && vec_matches_sorted(variable_usages, &other.variable_usages) && *operation_kind == other.operation_kind && *id == other.id - && *input_rewrites == other.input_rewrites - && *output_rewrites == other.output_rewrites - && *context_rewrites == other.context_rewrites + && same_rewrites(input_rewrites, &other.input_rewrites) + && same_rewrites(output_rewrites, &other.output_rewrites) + && same_rewrites(context_rewrites, &other.context_rewrites) && *authorization == other.authorization && operation_matches(operation, &other.operation) } @@ -368,6 +370,88 @@ fn flatten_node_matches(this: &FlattenNode, other: &FlattenNode) -> bool { *path == other.path && plan_node_matches(node, &other.node) } +// Copied and modified from `apollo_federation::operation::SelectionKey` +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +enum SelectionKey { + Field { + /// The field alias (if specified) or field name in the resulting selection set. + response_name: Name, + directives: ast::DirectiveList, + }, + FragmentSpread { + /// The name of the fragment. + fragment_name: Name, + directives: ast::DirectiveList, + }, + InlineFragment { + /// The optional type condition of the fragment. + type_condition: Option, + directives: ast::DirectiveList, + }, +} + +fn get_selection_key(selection: &Selection) -> SelectionKey { + match selection { + Selection::Field(field) => SelectionKey::Field { + response_name: field.response_name().clone(), + directives: Default::default(), + }, + Selection::InlineFragment(fragment) => SelectionKey::InlineFragment { + type_condition: fragment.type_condition.clone(), + directives: Default::default(), + }, + } +} + +fn hash_value(x: &T) -> u64 { + let mut hasher = DefaultHasher::new(); + x.hash(&mut hasher); + hasher.finish() +} + +fn hash_selection_key(selection: &Selection) -> u64 { + hash_value(&get_selection_key(selection)) +} + +fn same_selection(x: &Selection, y: &Selection) -> bool { + let x_key = get_selection_key(x); + let y_key = get_selection_key(y); + if x_key != y_key { + return false; + } + let x_selections = x.selection_set(); + let y_selections = y.selection_set(); + match (x_selections, y_selections) { + (Some(x), Some(y)) => same_selection_set_sorted(x, y), + (None, None) => true, + _ => false, + } +} + +fn same_selection_set_sorted(x: &[Selection], y: &[Selection]) -> bool { + fn sorted_by_selection_key(s: &[Selection]) -> Vec<&Selection> { + let mut sorted: Vec<&Selection> = s.iter().collect(); + sorted.sort_by_key(|x| hash_selection_key(x)); + sorted + } + + if x.len() != y.len() { + return false; + } + sorted_by_selection_key(x) + .into_iter() + .zip(sorted_by_selection_key(y)) + .all(|(x, y)| same_selection(x, y)) +} + +fn same_rewrites(x: &Option>, y: &Option>) -> bool { + match (x, y) { + (None, None) => true, + (Some(x), Some(y)) => vec_matches_as_set(x, y, |a, b| a == b), + _ => false, + } +} + //================================================================================================== // AST comparison functions @@ -433,27 +517,7 @@ fn same_ast_fragment_definition(x: &ast::FragmentDefinition, y: &ast::FragmentDe && same_ast_selection_set_sorted(&x.selection_set, &y.selection_set) } -// Copied and modified from `apollo_federation::operation::SelectionKey` -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub(crate) enum SelectionKey { - Field { - /// The field alias (if specified) or field name in the resulting selection set. - response_name: Name, - directives: ast::DirectiveList, - }, - FragmentSpread { - /// The name of the fragment. - fragment_name: Name, - directives: ast::DirectiveList, - }, - InlineFragment { - /// The optional type condition of the fragment. - type_condition: Option, - directives: ast::DirectiveList, - }, -} - -fn get_selection_key(selection: &ast::Selection) -> SelectionKey { +fn get_ast_selection_key(selection: &ast::Selection) -> SelectionKey { match selection { ast::Selection::Field(field) => SelectionKey::Field { response_name: field.response_name().clone(), @@ -473,7 +537,7 @@ fn get_selection_key(selection: &ast::Selection) -> SelectionKey { use std::ops::Not; /// Get the sub-selections of a selection. -fn get_selection_set(selection: &ast::Selection) -> Option<&Vec> { +fn get_ast_selection_set(selection: &ast::Selection) -> Option<&Vec> { match selection { ast::Selection::Field(field) => field .selection_set @@ -486,13 +550,13 @@ fn get_selection_set(selection: &ast::Selection) -> Option<&Vec> } fn same_ast_selection(x: &ast::Selection, y: &ast::Selection) -> bool { - let x_key = get_selection_key(x); - let y_key = get_selection_key(y); + let x_key = get_ast_selection_key(x); + let y_key = get_ast_selection_key(y); if x_key != y_key { return false; } - let x_selections = get_selection_set(x); - let y_selections = get_selection_set(y); + let x_selections = get_ast_selection_set(x); + let y_selections = get_ast_selection_set(y); match (x_selections, y_selections) { (Some(x), Some(y)) => same_ast_selection_set_sorted(x, y), (None, None) => true, @@ -500,20 +564,14 @@ fn same_ast_selection(x: &ast::Selection, y: &ast::Selection) -> bool { } } -fn hash_value(x: &T) -> u64 { - let mut hasher = DefaultHasher::new(); - x.hash(&mut hasher); - hasher.finish() -} - -fn hash_selection_key(selection: &ast::Selection) -> u64 { - hash_value(&get_selection_key(selection)) +fn hash_ast_selection_key(selection: &ast::Selection) -> u64 { + hash_value(&get_ast_selection_key(selection)) } fn same_ast_selection_set_sorted(x: &[ast::Selection], y: &[ast::Selection]) -> bool { fn sorted_by_selection_key(s: &[ast::Selection]) -> Vec<&ast::Selection> { let mut sorted: Vec<&ast::Selection> = s.iter().collect(); - sorted.sort_by_key(|x| hash_selection_key(x)); + sorted.sort_by_key(|x| hash_ast_selection_key(x)); sorted } diff --git a/apollo-router/src/query_planner/selection.rs b/apollo-router/src/query_planner/selection.rs index b6dd46ffa4..e2c5e9b013 100644 --- a/apollo-router/src/query_planner/selection.rs +++ b/apollo-router/src/query_planner/selection.rs @@ -23,6 +23,17 @@ pub(crate) enum Selection { InlineFragment(InlineFragment), } +impl Selection { + pub(crate) fn selection_set(&self) -> Option<&[Selection]> { + match self { + Selection::Field(Field { selections, .. }) => selections.as_deref(), + Selection::InlineFragment(InlineFragment { selections, .. }) => { + Some(selections.as_slice()) + } + } + } +} + /// The field that is used #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] @@ -39,6 +50,13 @@ pub(crate) struct Field { pub(crate) selections: Option>, } +impl Field { + // Mirroring `apollo_compiler::Field::response_name` + pub(crate) fn response_name(&self) -> &Name { + self.alias.as_ref().unwrap_or(&self.name) + } +} + /// An inline fragment. #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] From 2fecee8e7a94f7ff19855e62562f3222fe7ba229 Mon Sep 17 00:00:00 2001 From: Iryna Shestak Date: Thu, 25 Jul 2024 22:53:41 +0200 Subject: [PATCH 009/108] chore(qp): add a diff_plan function to display differences between js & rust qps (#5727) Co-authored-by: Duckki Oe --- apollo-router/src/lib.rs | 1 + .../src/query_planner/dual_query_planner.rs | 35 +++++++++++++++---- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/apollo-router/src/lib.rs b/apollo-router/src/lib.rs index 656f33c976..67f6f70daf 100644 --- a/apollo-router/src/lib.rs +++ b/apollo-router/src/lib.rs @@ -115,6 +115,7 @@ pub mod _private { pub use crate::plugin::PLUGINS; // For comparison/fuzzing pub use crate::query_planner::bridge_query_planner::QueryPlanResult; + pub use crate::query_planner::dual_query_planner::diff_plan; pub use crate::query_planner::dual_query_planner::plan_matches; // For tests pub use crate::router_factory::create_test_service_factory_from_yaml; diff --git a/apollo-router/src/query_planner/dual_query_planner.rs b/apollo-router/src/query_planner/dual_query_planner.rs index 99d825ac12..35a7fe8ce0 100644 --- a/apollo-router/src/query_planner/dual_query_planner.rs +++ b/apollo-router/src/query_planner/dual_query_planner.rs @@ -145,12 +145,10 @@ impl BothModeComparisonJob { tracing::debug!("JS and Rust query plans match{operation_desc}! 🎉"); } else { tracing::debug!("JS v.s. Rust query plan mismatch{operation_desc}"); - if let Some(formatted) = &js_plan.formatted_query_plan { - tracing::debug!( - "Diff of formatted plans:\n{}", - render_diff(&diff::lines(formatted, &rust_plan.to_string())) - ); - } + tracing::debug!( + "Diff of formatted plans:\n{}", + diff_plan(js_plan, rust_plan) + ); tracing::trace!("JS query plan Debug: {js_root_node:#?}"); tracing::trace!("Rust query plan Debug: {rust_root_node:#?}"); } @@ -244,6 +242,31 @@ pub fn plan_matches(js_plan: &QueryPlanResult, rust_plan: &QueryPlan) -> bool { opt_plan_node_matches(js_root_node, &rust_root_node) } +pub fn diff_plan(js_plan: &QueryPlanResult, rust_plan: &QueryPlan) -> String { + let js_root_node = &js_plan.query_plan.node; + let rust_root_node = convert_root_query_plan_node(rust_plan); + + match (js_root_node, rust_root_node) { + (None, None) => String::from(""), + (None, Some(rust)) => { + let rust = &format!("{rust:#?}"); + let differences = diff::lines("", rust); + render_diff(&differences) + } + (Some(js), None) => { + let js = &format!("{js:#?}"); + let differences = diff::lines(js, ""); + render_diff(&differences) + } + (Some(js), Some(rust)) => { + let rust = &format!("{rust:#?}"); + let js = &format!("{js:#?}"); + let differences = diff::lines(js, rust); + render_diff(&differences) + } + } +} + fn opt_plan_node_matches( this: &Option>, other: &Option>, From e7cc5e9a814dbaaa43cc0499a2880a6cf8ad72b0 Mon Sep 17 00:00:00 2001 From: Jeremy Lempereur Date: Fri, 26 Jul 2024 11:25:43 +0200 Subject: [PATCH 010/108] add a bench command to generate reports against a schema and an operations folder (#5726) --- Cargo.lock | 3 + apollo-federation/cli/Cargo.toml | 7 + .../cli/fixtures/queries/topproducts.graphql | 11 ++ .../cli/fixtures/queries/topproducts2.graphql | 6 + .../cli/fixtures/starstuff.graphql | 98 ++++++++++++++ apollo-federation/cli/src/bench.rs | 126 ++++++++++++++++++ apollo-federation/cli/src/main.rs | 37 +++++ .../apollo_federation_cli__bench.snap | 20 +++ 8 files changed, 308 insertions(+) create mode 100644 apollo-federation/cli/fixtures/queries/topproducts.graphql create mode 100644 apollo-federation/cli/fixtures/queries/topproducts2.graphql create mode 100644 apollo-federation/cli/fixtures/starstuff.graphql create mode 100644 apollo-federation/cli/src/bench.rs create mode 100644 apollo-federation/cli/src/snapshots/apollo_federation_cli__bench.snap diff --git a/Cargo.lock b/Cargo.lock index 2a345f0f1e..865b8fe132 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -457,6 +457,9 @@ dependencies = [ "apollo-compiler", "apollo-federation", "clap", + "insta", + "serde", + "serde_json", ] [[package]] diff --git a/apollo-federation/cli/Cargo.toml b/apollo-federation/cli/Cargo.toml index b64cc7a03c..6512e73ac8 100644 --- a/apollo-federation/cli/Cargo.toml +++ b/apollo-federation/cli/Cargo.toml @@ -7,3 +7,10 @@ edition = "2021" apollo-compiler.workspace = true apollo-federation = { path = ".." } clap = { version = "4.5.1", features = ["derive"] } + +[dev-dependencies] +insta = { version = "1.38.0", features = ["json", "redactions"] } +serde = { version = "1.0.197", features = ["derive"] } +serde_json = { version = "1.0.114", features = [ + "preserve_order", +] } \ No newline at end of file diff --git a/apollo-federation/cli/fixtures/queries/topproducts.graphql b/apollo-federation/cli/fixtures/queries/topproducts.graphql new file mode 100644 index 0000000000..6d6676a990 --- /dev/null +++ b/apollo-federation/cli/fixtures/queries/topproducts.graphql @@ -0,0 +1,11 @@ +query TopProducts($first: Int) { + topProducts(first: $first) { + upc + name + reviews { + id + product { name } + author { id name } + } + } +} \ No newline at end of file diff --git a/apollo-federation/cli/fixtures/queries/topproducts2.graphql b/apollo-federation/cli/fixtures/queries/topproducts2.graphql new file mode 100644 index 0000000000..92df02561d --- /dev/null +++ b/apollo-federation/cli/fixtures/queries/topproducts2.graphql @@ -0,0 +1,6 @@ +query TopProduct2($first: Int) { + topProducts(first: $first) { + upc + name + } +} \ No newline at end of file diff --git a/apollo-federation/cli/fixtures/starstuff.graphql b/apollo-federation/cli/fixtures/starstuff.graphql new file mode 100644 index 0000000000..504fbbaafb --- /dev/null +++ b/apollo-federation/cli/fixtures/starstuff.graphql @@ -0,0 +1,98 @@ +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query + mutation: Mutation +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +scalar join__FieldSet + +enum join__Graph { + ACCOUNTS @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev/") + INVENTORY @join__graph(name: "inventory", url: "https://inventory.demo.starstuff.dev/") + PRODUCTS @join__graph(name: "products", url: "https://products.demo.starstuff.dev/") + REVIEWS @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev/") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Mutation + @join__type(graph: PRODUCTS) + @join__type(graph: REVIEWS) +{ + createProduct(upc: ID!, name: String): Product @join__field(graph: PRODUCTS) + createReview(upc: ID!, id: ID!, body: String): Review @join__field(graph: REVIEWS) +} + +type Product + @join__type(graph: ACCOUNTS, key: "upc", extension: true) + @join__type(graph: INVENTORY, key: "upc") + @join__type(graph: PRODUCTS, key: "upc") + @join__type(graph: REVIEWS, key: "upc") +{ + upc: String! + weight: Int @join__field(graph: INVENTORY, external: true) @join__field(graph: PRODUCTS) + price: Int @join__field(graph: INVENTORY, external: true) @join__field(graph: PRODUCTS) + inStock: Boolean @join__field(graph: INVENTORY) + shippingEstimate: Int @join__field(graph: INVENTORY, requires: "price weight") + name: String @join__field(graph: PRODUCTS) + reviews: [Review] @join__field(graph: REVIEWS) + reviewsForAuthor(authorID: ID!): [Review] @join__field(graph: REVIEWS) +} + +type Query + @join__type(graph: ACCOUNTS) + @join__type(graph: INVENTORY) + @join__type(graph: PRODUCTS) + @join__type(graph: REVIEWS) +{ + me: User @join__field(graph: ACCOUNTS) + recommendedProducts: [Product] @join__field(graph: ACCOUNTS) + topProducts(first: Int = 5): [Product] @join__field(graph: PRODUCTS) +} + +type Review + @join__type(graph: REVIEWS, key: "id") +{ + id: ID! + body: String + author: User @join__field(graph: REVIEWS, provides: "username") + product: Product +} + +type User + @join__type(graph: ACCOUNTS, key: "id") + @join__type(graph: REVIEWS, key: "id") +{ + id: ID! + name: String @join__field(graph: ACCOUNTS) + username: String @join__field(graph: ACCOUNTS) @join__field(graph: REVIEWS, external: true) + reviews: [Review] @join__field(graph: REVIEWS) +} diff --git a/apollo-federation/cli/src/bench.rs b/apollo-federation/cli/src/bench.rs new file mode 100644 index 0000000000..cd098a1989 --- /dev/null +++ b/apollo-federation/cli/src/bench.rs @@ -0,0 +1,126 @@ +use std::fmt::Display; +use std::path::PathBuf; +use std::time::Instant; + +use apollo_compiler::ExecutableDocument; +use apollo_federation::error::FederationError; +use apollo_federation::query_plan::query_planner::QueryPlanner; +use apollo_federation::query_plan::query_planner::QueryPlannerConfig; +use apollo_federation::Supergraph; + +pub(crate) fn run_bench( + supergraph: Supergraph, + queries_dir: &PathBuf, +) -> Result, FederationError> { + let planner = QueryPlanner::new( + &supergraph, + QueryPlannerConfig { + reuse_query_fragments: false, + subgraph_graphql_validation: false, + generate_query_fragments: true, + ..Default::default() + }, + ) + .expect("Invalid planner"); + + let mut entries = std::fs::read_dir(queries_dir) + .unwrap() + .map(|res| res.map(|e| e.path())) + .collect::, std::io::Error>>() + .unwrap(); + + entries.sort(); + + let mut results = Vec::with_capacity(entries.len()); + + for query_path in entries.into_iter() { + let query_string = std::fs::read_to_string(query_path.clone()).unwrap(); + + let file_name = query_path + .file_name() + .to_owned() + .unwrap() + .to_string_lossy() + .to_string(); + + let document = match ExecutableDocument::parse_and_validate( + supergraph.schema.schema(), + query_string, + "query", + ) { + Ok(document) => document, + Err(_) => { + results.push(BenchOutput { + query_name: file_name.split('-').next().unwrap().to_string(), + file_name, + timing: 0.0, + eval_plans: None, + error: Some("error".to_string()), + }); + + continue; + } + }; + let now = Instant::now(); + let plan = planner.build_query_plan(&document, None); + let elapsed = now.elapsed().as_secs_f64() * 1000.0; + let mut eval_plans = None; + let mut error = None; + if let Ok(p) = plan { + eval_plans = Some(p.statistics.evaluated_plan_count.into_inner().to_string()); + } else { + error = Some("error".to_string()); + }; + + results.push(BenchOutput { + query_name: file_name.split('-').next().unwrap().to_string(), + file_name, + timing: elapsed, + eval_plans, + error, + }); + } + + // totally arbitrary + results.sort_by(|a, b| a.partial_cmp(b).unwrap_or(a.query_name.cmp(&b.query_name))); + Ok(results) +} + +#[derive(Debug)] +#[cfg_attr(test, derive(serde::Serialize))] +pub(crate) struct BenchOutput { + file_name: String, + query_name: String, + timing: f64, + eval_plans: Option, + error: Option, +} + +impl PartialEq for BenchOutput { + fn eq(&self, other: &Self) -> bool { + self.timing == other.timing + } +} + +impl PartialOrd for BenchOutput { + fn partial_cmp(&self, other: &Self) -> Option { + match other.timing.partial_cmp(&self.timing) { + Some(core::cmp::Ordering::Equal) => Some(core::cmp::Ordering::Equal), + ord => ord, + } + } +} + +impl Display for BenchOutput { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "| [{}](queries/{}) | {} | {} | {} |", + self.query_name, + self.file_name, + self.timing, + self.eval_plans.clone().unwrap_or(" ".to_string()), + self.error.clone().unwrap_or(" ".to_string()) + ) + } +} diff --git a/apollo-federation/cli/src/main.rs b/apollo-federation/cli/src/main.rs index 415b7efb7b..41ba7e5f08 100644 --- a/apollo-federation/cli/src/main.rs +++ b/apollo-federation/cli/src/main.rs @@ -11,8 +11,12 @@ use apollo_federation::query_graph; use apollo_federation::query_plan::query_planner::QueryPlanner; use apollo_federation::query_plan::query_planner::QueryPlannerConfig; use apollo_federation::subgraph; +use bench::BenchOutput; use clap::Parser; +mod bench; +use bench::run_bench; + /// CLI arguments. See #[derive(Parser)] struct Args { @@ -60,6 +64,12 @@ enum Command { /// The output directory for the extracted subgraph schemas destination_dir: Option, }, + Bench { + /// The path to the supergraph schema file + supergraph_schema: PathBuf, + /// The path to the directory that contains all operations to run against + operations_dir: PathBuf, + }, } fn main() -> ExitCode { @@ -75,6 +85,10 @@ fn main() -> ExitCode { supergraph_schema, destination_dir, } => cmd_extract(&supergraph_schema, destination_dir.as_ref()), + Command::Bench { + supergraph_schema, + operations_dir, + } => cmd_bench(&supergraph_schema, &operations_dir), }; match result { Err(error) => { @@ -210,3 +224,26 @@ fn cmd_extract(file_path: &Path, dest: Option<&PathBuf>) -> Result<(), Federatio } Ok(()) } + +fn _cmd_bench( + file_path: &Path, + operations_dir: &PathBuf, +) -> Result, FederationError> { + let supergraph = load_supergraph_file(file_path)?; + run_bench(supergraph, operations_dir) +} + +fn cmd_bench(file_path: &Path, operations_dir: &PathBuf) -> Result<(), FederationError> { + let results = _cmd_bench(file_path, operations_dir)?; + println!("| operation_name | time (ms) | evaluated_plans (max 10000) | error |"); + println!("|----------------|----------------|-----------|-----------------------------|"); + for r in results { + println!("{}", r); + } + Ok(()) +} + +#[test] +fn test_bench() { + insta::assert_json_snapshot!(_cmd_bench(Path::new("./fixtures/starstuff.graphql"), &PathBuf::from("./fixtures/queries")).unwrap(), { "[].timing" => 1.234 }); +} diff --git a/apollo-federation/cli/src/snapshots/apollo_federation_cli__bench.snap b/apollo-federation/cli/src/snapshots/apollo_federation_cli__bench.snap new file mode 100644 index 0000000000..40588f3640 --- /dev/null +++ b/apollo-federation/cli/src/snapshots/apollo_federation_cli__bench.snap @@ -0,0 +1,20 @@ +--- +source: apollo-federation/cli/src/main.rs +expression: "_cmd_bench(&Path::new(\"./test/starstuff.graphql\"),\n &PathBuf::from(\"./test/queries\")).unwrap()" +--- +[ + { + "file_name": "topproducts.graphql", + "query_name": "topproducts.graphql", + "timing": 1.234, + "eval_plans": "1", + "error": null + }, + { + "file_name": "topproducts2.graphql", + "query_name": "topproducts2.graphql", + "timing": 1.234, + "eval_plans": "1", + "error": null + } +] From 32bd66d9cc437fc81113905d33935c83cfd2f2df Mon Sep 17 00:00:00 2001 From: Duckki Oe Date: Fri, 26 Jul 2024 02:40:11 -0700 Subject: [PATCH 011/108] export render_diff via the `_private` module (#5729) --- apollo-router/src/lib.rs | 1 + .../src/query_planner/bridge_query_planner.rs | 11 +++++++++-- apollo-router/src/query_planner/dual_query_planner.rs | 2 +- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/apollo-router/src/lib.rs b/apollo-router/src/lib.rs index 67f6f70daf..9ac39c9e23 100644 --- a/apollo-router/src/lib.rs +++ b/apollo-router/src/lib.rs @@ -114,6 +114,7 @@ pub mod _private { pub use crate::plugin::PluginFactory; pub use crate::plugin::PLUGINS; // For comparison/fuzzing + pub use crate::query_planner::bridge_query_planner::render_diff; pub use crate::query_planner::bridge_query_planner::QueryPlanResult; pub use crate::query_planner::dual_query_planner::diff_plan; pub use crate::query_planner::dual_query_planner::plan_matches; diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index cdd1d710ef..eb5990e43e 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -878,7 +878,7 @@ impl BridgeQueryPlanner { } /// Data coming from the `plan` method on the router_bridge -// Note: Reexported under `apollo_compiler::_private` +// Note: Reexported under `apollo_router::_private` #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct QueryPlanResult { @@ -886,6 +886,12 @@ pub struct QueryPlanResult { pub(super) query_plan: QueryPlan, } +impl QueryPlanResult { + pub fn formatted_query_plan(&self) -> Option<&str> { + self.formatted_query_plan.as_deref().map(String::as_str) + } +} + #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] /// The root query plan container. @@ -894,7 +900,8 @@ pub(super) struct QueryPlan { pub(super) node: Option>, } -pub(crate) fn render_diff(differences: &[diff::Result<&str>]) -> String { +// Note: Reexported under `apollo_router::_private` +pub fn render_diff(differences: &[diff::Result<&str>]) -> String { let mut output = String::new(); for diff_line in differences { match diff_line { diff --git a/apollo-router/src/query_planner/dual_query_planner.rs b/apollo-router/src/query_planner/dual_query_planner.rs index 35a7fe8ce0..0ef5bde512 100644 --- a/apollo-router/src/query_planner/dual_query_planner.rs +++ b/apollo-router/src/query_planner/dual_query_planner.rs @@ -235,7 +235,7 @@ fn operation_matches(this: &SubgraphOperation, other: &SubgraphOperation) -> boo // The rest is calling the comparison functions above instead of `PartialEq`, // but otherwise behave just like `PartialEq`: -// Note: Reexported under `apollo_compiler::_private` +// Note: Reexported under `apollo_router::_private` pub fn plan_matches(js_plan: &QueryPlanResult, rust_plan: &QueryPlan) -> bool { let js_root_node = &js_plan.query_plan.node; let rust_root_node = convert_root_query_plan_node(rust_plan); From 51a44949ea12131fb69a8c0bf9f7ea83bf7136c9 Mon Sep 17 00:00:00 2001 From: Lenny Burdette Date: Fri, 26 Jul 2024 08:47:46 -0400 Subject: [PATCH 012/108] join__directive support in extract_subgraphs_from_supergraph (#5720) --- apollo-federation/src/link/mod.rs | 20 ++ .../extract_subgraphs_from_supergraph.rs | 258 +++++++++++++++++- apollo-federation/src/subgraph/mod.rs | 11 + apollo-federation/src/subgraph/spec.rs | 2 + 4 files changed, 290 insertions(+), 1 deletion(-) diff --git a/apollo-federation/src/link/mod.rs b/apollo-federation/src/link/mod.rs index 272c5f4adc..326eee308f 100644 --- a/apollo-federation/src/link/mod.rs +++ b/apollo-federation/src/link/mod.rs @@ -6,9 +6,11 @@ use std::sync::Arc; use apollo_compiler::ast::Directive; use apollo_compiler::ast::Value; use apollo_compiler::name; +use apollo_compiler::schema::Component; use apollo_compiler::InvalidNameError; use apollo_compiler::Name; use apollo_compiler::Node; +use apollo_compiler::Schema; use thiserror::Error; use crate::error::FederationError; @@ -329,6 +331,24 @@ impl Link { purpose, }) } + + pub fn for_identity<'schema>( + schema: &'schema Schema, + identity: &Identity, + ) -> Option<(Self, &'schema Component)> { + schema + .schema_definition + .directives + .iter() + .find_map(|directive| { + let link = Link::from_directive_application(directive).ok()?; + if link.url.identity == *identity { + Some((link, directive)) + } else { + None + } + }) + } } impl fmt::Display for Link { diff --git a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs b/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs index ef354eaef1..ebbceade17 100644 --- a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs +++ b/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs @@ -4,6 +4,8 @@ use std::fmt::Write; use std::ops::Deref; use std::sync::Arc; +use apollo_compiler::ast::Argument; +use apollo_compiler::ast::Directive; use apollo_compiler::ast::FieldDefinition; use apollo_compiler::collections::IndexMap; use apollo_compiler::collections::IndexSet; @@ -31,6 +33,7 @@ use apollo_compiler::schema::UnionType; use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_compiler::Node; +use itertools::Itertools; use lazy_static::lazy_static; use time::OffsetDateTime; @@ -46,6 +49,7 @@ use crate::link::join_spec_definition::TypeDirectiveArguments; use crate::link::spec::Identity; use crate::link::spec::Version; use crate::link::spec_definition::SpecDefinition; +use crate::link::DEFAULT_LINK_NAME; use crate::schema::field_set::parse_field_set_without_normalization; use crate::schema::position::is_graphql_reserved_name; use crate::schema::position::CompositeTypeDefinitionPosition; @@ -358,6 +362,12 @@ fn extract_subgraphs_from_fed_2_supergraph( &input_object_types, )?; + extract_join_directives( + supergraph_schema, + subgraphs, + graph_enum_value_name_to_subgraph_name, + )?; + // We add all the "executable" directive definitions from the supergraph to each subgraphs, as // those may be part of a query and end up in any subgraph fetches. We do this "last" to make // sure that if one of the directives uses a type for an argument, that argument exists. Note @@ -1491,7 +1501,7 @@ impl IntoIterator for FederationSubgraphs { // TODO(@goto-bus-stop): consider an appropriate name for this in the public API // TODO(@goto-bus-stop): should this exist separately from the `crate::subgraph::Subgraph` type? -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ValidFederationSubgraph { pub name: String, pub url: String, @@ -2094,10 +2104,181 @@ fn maybe_dump_subgraph_schema(subgraph: FederationSubgraph, message: &mut String }; } +//////////////////////////////////////////////////////////////////////////////// +/// @join__directive extraction + +static JOIN_DIRECTIVE: &str = "join__directive"; + +/// Converts `@join__directive(graphs: [A], name: "foo")` to `@foo` in the A subgraph. +/// If the directive is a link directive on the schema definition, we also need +/// to update the metadata and add the imported definitions. +fn extract_join_directives( + supergraph_schema: &FederationSchema, + subgraphs: &mut FederationSubgraphs, + graph_enum_value_name_to_subgraph_name: &IndexMap>, +) -> Result<(), FederationError> { + let join_directives = match supergraph_schema + .referencers() + .get_directive(JOIN_DIRECTIVE) + { + Ok(directives) => directives, + Err(_) => { + // No join directives found, nothing to do. + return Ok(()); + } + }; + + if let Some(schema_def_pos) = &join_directives.schema { + let schema_def = schema_def_pos.get(supergraph_schema.schema()); + let directives = schema_def + .directives + .iter() + .filter_map(|d| { + if d.name == JOIN_DIRECTIVE { + Some(join_directive_to_real_directive(d)) + } else { + None + } + }) + .collect_vec(); + + // TODO: Do we need to handle the link directive being renamed? + let (links, others) = directives + .into_iter() + .partition::, _>(|(d, _)| d.name == DEFAULT_LINK_NAME); + + // After adding links, we'll check the link against a safelist of + // specs and check_or_add the spec definitions if necessary. + for (link_directive, subgraph_enum_values) in links { + for subgraph_enum_value in subgraph_enum_values { + let subgraph = get_subgraph( + subgraphs, + graph_enum_value_name_to_subgraph_name, + &subgraph_enum_value, + )?; + + schema_def_pos.insert_directive( + &mut subgraph.schema, + Component::new(link_directive.clone()), + )?; + + // TODO: add imported definitions from relevant specs + } + } + + // Other directives are added normally. + for (directive, subgraph_enum_values) in others { + for subgraph_enum_value in subgraph_enum_values { + let subgraph = get_subgraph( + subgraphs, + graph_enum_value_name_to_subgraph_name, + &subgraph_enum_value, + )?; + + schema_def_pos + .insert_directive(&mut subgraph.schema, Component::new(directive.clone()))?; + } + } + } + + for object_field_pos in &join_directives.object_fields { + let object_field = object_field_pos.get(supergraph_schema.schema())?; + let directives = object_field + .directives + .iter() + .filter_map(|d| { + if d.name == JOIN_DIRECTIVE { + Some(join_directive_to_real_directive(d)) + } else { + None + } + }) + .collect_vec(); + + for (directive, subgraph_enum_values) in directives { + for subgraph_enum_value in subgraph_enum_values { + let subgraph = get_subgraph( + subgraphs, + graph_enum_value_name_to_subgraph_name, + &subgraph_enum_value, + )?; + + object_field_pos + .insert_directive(&mut subgraph.schema, Node::new(directive.clone()))?; + } + } + } + + // TODO + // - join_directives.directive_arguments + // - join_directives.enum_types + // - join_directives.enum_values + // - join_directives.input_object_fields + // - join_directives.input_object_types + // - join_directives.interface_field_arguments + // - join_directives.interface_fields + // - join_directives.interface_types + // - join_directives.object_field_arguments + // - join_directives.object_types + // - join_directives.scalar_types + // - join_directives.union_types + + Ok(()) +} + +fn join_directive_to_real_directive(directive: &Node) -> (Directive, Vec) { + let subgraph_enum_values = directive + .argument_by_name("graphs") + .and_then(|arg| arg.as_list()) + .map(|list| { + list.iter() + .map(|node| { + Name::new( + node.as_enum() + .expect("join__directive(graphs:) value is an enum") + .as_str(), + ) + .expect("join__directive(graphs:) value is a valid name") + }) + .collect() + }) + .expect("join__directive(graphs:) missing"); + + let name = directive + .argument_by_name("name") + .expect("join__directive(name:) is present") + .as_str() + .expect("join__directive(name:) is a string"); + + let arguments = directive + .argument_by_name("args") + .and_then(|a| a.as_object()) + .map(|args| { + args.iter() + .map(|(k, v)| { + Argument { + name: k.clone(), + value: v.clone(), + } + .into() + }) + .collect() + }) + .unwrap_or_default(); + + let directive = Directive { + name: Name::new(name).expect("join__directive(name:) is a valid name"), + arguments, + }; + + (directive, subgraph_enum_values) +} + #[cfg(test)] mod tests { use apollo_compiler::name; use apollo_compiler::Schema; + use insta::assert_snapshot; use crate::schema::FederationSchema; use crate::ValidFederationSubgraphs; @@ -2709,4 +2890,79 @@ mod tests { let user_type = subgraph.schema.schema().get_object("User"); assert!(user_type.is_none()); } + + #[test] + fn test_join_directives() { + let supergraph = r###"schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @join__directive(graphs: [SUBGRAPH], name: "link", args: {url: "https://specs.apollo.dev/hello/v0.1", import: ["@hello"]}) + { + query: Query + } + + directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + + directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + + directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + + directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + + directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! + } + + scalar join__DirectiveArguments + + scalar join__FieldSet + + scalar join__FieldValue + + enum join__Graph { + SUBGRAPH @join__graph(name: "subgraph", url: "none") + } + + scalar link__Import + + enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + type Query + @join__type(graph: SUBGRAPH) + { + f: String + } + "###; + + let schema = Schema::parse(supergraph, "supergraph.graphql").unwrap(); + let ValidFederationSubgraphs { subgraphs } = super::extract_subgraphs_from_supergraph( + &FederationSchema::new(schema).unwrap(), + Some(true), + ) + .unwrap(); + + let subgraph = subgraphs.get("subgraph").unwrap(); + assert_snapshot!(subgraph.schema.schema().schema_definition.directives, @r###" @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.5") @link(url: "https://specs.apollo.dev/hello/v0.1", import: ["@hello"])"###); + } } diff --git a/apollo-federation/src/subgraph/mod.rs b/apollo-federation/src/subgraph/mod.rs index 959d731ec3..4ba138ba8f 100644 --- a/apollo-federation/src/subgraph/mod.rs +++ b/apollo-federation/src/subgraph/mod.rs @@ -28,6 +28,7 @@ use crate::subgraph::spec::FEDERATION_V2_DIRECTIVE_NAMES; use crate::subgraph::spec::KEY_DIRECTIVE_NAME; use crate::subgraph::spec::SERVICE_SDL_QUERY; use crate::subgraph::spec::SERVICE_TYPE; +use crate::ValidFederationSubgraph; mod database; pub mod spec; @@ -327,6 +328,16 @@ impl std::fmt::Debug for ValidSubgraph { } } +impl From for ValidSubgraph { + fn from(value: ValidFederationSubgraph) -> Self { + Self { + name: value.name, + url: value.url, + schema: value.schema.schema().clone(), + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/apollo-federation/src/subgraph/spec.rs b/apollo-federation/src/subgraph/spec.rs index 9c3aa1ebd7..e5fd352a8c 100644 --- a/apollo-federation/src/subgraph/spec.rs +++ b/apollo-federation/src/subgraph/spec.rs @@ -48,6 +48,8 @@ pub const PROVIDES_DIRECTIVE_NAME: Name = name!("provides"); pub const REQUIRES_DIRECTIVE_NAME: Name = name!("requires"); pub const SHAREABLE_DIRECTIVE_NAME: Name = name!("shareable"); pub const TAG_DIRECTIVE_NAME: Name = name!("tag"); +pub const CONTEXT_DIRECTIVE_NAME: Name = name!("context"); +pub const FROM_CONTEXT_DIRECTIVE_NAME: Name = name!("fromContext"); pub const FIELDSET_SCALAR_NAME: Name = name!("FieldSet"); // federated types From 643e2e8b084ab265a265ab21dd616589dc60008a Mon Sep 17 00:00:00 2001 From: Jon Christiansen <467023+theJC@users.noreply.github.com> Date: Sat, 27 Jul 2024 00:19:34 -0500 Subject: [PATCH 013/108] Update comment per PR comment suggestion --- helm/chart/router/values.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/helm/chart/router/values.yaml b/helm/chart/router/values.yaml index 01045bd90a..c7f34aaf06 100644 --- a/helm/chart/router/values.yaml +++ b/helm/chart/router/values.yaml @@ -218,11 +218,9 @@ autoscaling: # type: cpu # targetUtilizationPercentage: 75 - +# -- Sets the [rolling update strategy parameters](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment). Can take absolute values or % values. rollingUpdate: {} -# Adjust rolling update strategy. Can take absolute values or % values. -# (https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment) # Defaults if not set are: # maxUnavailable: 25% # maxSurge: 25% From 83277755d835422a0fb0139ae5a41be6d67d48e2 Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Mon, 29 Jul 2024 10:31:32 +0200 Subject: [PATCH 014/108] Evaluate selectors in response stage when possible (#5725) Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> Co-authored-by: Bryn Cooke --- ...x_bnjjj_fix_subgraph_selector_for_event.md | 20 +++++ .../plugins/telemetry/config_new/events.rs | 1 + .../plugins/telemetry/config_new/selectors.rs | 85 +++++++++++++++++++ .../testdata/custom_events.router.yaml | 10 ++- 4 files changed, 113 insertions(+), 3 deletions(-) create mode 100644 .changesets/fix_bnjjj_fix_subgraph_selector_for_event.md diff --git a/.changesets/fix_bnjjj_fix_subgraph_selector_for_event.md b/.changesets/fix_bnjjj_fix_subgraph_selector_for_event.md new file mode 100644 index 0000000000..db47321d20 --- /dev/null +++ b/.changesets/fix_bnjjj_fix_subgraph_selector_for_event.md @@ -0,0 +1,20 @@ +### Evaluate selectors in response stage when possible ([PR #5725](https://github.com/apollographql/router/pull/5725)) + +As `events` are triggered at a specific event (`request`|`response`|`error`) we can only have condition for the related event, but sometimes selectors that can be applied at several events (like `subgraph_name` to get the subgraph name). Adds support for various supergraph selectors on response events. + +Example of an event to log the raw subgraph response only on a subgraph named `products`, this was not working before. + +```yaml +telemetry: + instrumentation: + events: + subgraph: + response: + level: info + condition: + eq: + - subgraph_name: true + - "products" +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5725 \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/config_new/events.rs b/apollo-router/src/plugins/telemetry/config_new/events.rs index c68dea7f0d..bf471dc019 100644 --- a/apollo-router/src/plugins/telemetry/config_new/events.rs +++ b/apollo-router/src/plugins/telemetry/config_new/events.rs @@ -1006,6 +1006,7 @@ mod tests { subgraph::Response::fake2_builder() .header("custom-header", "val1") .header("x-log-response", HeaderValue::from_static("log")) + .subgraph_name("subgraph") .data(serde_json::json!({"data": "res"}).to_string()) .build() .expect("expecting valid response") diff --git a/apollo-router/src/plugins/telemetry/config_new/selectors.rs b/apollo-router/src/plugins/telemetry/config_new/selectors.rs index 3f8cde2faf..9b373f8ac9 100644 --- a/apollo-router/src/plugins/telemetry/config_new/selectors.rs +++ b/apollo-router/src/plugins/telemetry/config_new/selectors.rs @@ -1357,6 +1357,39 @@ impl Selector for SubgraphSelector { .canonical_reason() .map(|reason| reason.into()), }, + SubgraphSelector::SubgraphOperationKind { .. } => response + .context + .get::<_, String>(OPERATION_KIND) + .ok() + .flatten() + .map(opentelemetry::Value::from), + SubgraphSelector::SupergraphOperationKind { .. } => response + .context + .get::<_, String>(OPERATION_KIND) + .ok() + .flatten() + .map(opentelemetry::Value::from), + SubgraphSelector::SupergraphOperationName { + supergraph_operation_name, + default, + .. + } => { + let op_name = response.context.get(OPERATION_NAME).ok().flatten(); + match supergraph_operation_name { + OperationName::String => op_name.or_else(|| default.clone()), + OperationName::Hash => op_name.or_else(|| default.clone()).map(|op_name| { + let mut hasher = sha2::Sha256::new(); + hasher.update(op_name.as_bytes()); + let result = hasher.finalize(); + hex::encode(result) + }), + } + .map(opentelemetry::Value::from) + } + SubgraphSelector::SubgraphName { subgraph_name } if *subgraph_name => response + .subgraph_name + .clone() + .map(opentelemetry::Value::from), SubgraphSelector::SubgraphResponseBody { subgraph_response_body, default, @@ -1452,6 +1485,33 @@ impl Selector for SubgraphSelector { fn on_error(&self, error: &tower::BoxError, ctx: &Context) -> Option { match self { + SubgraphSelector::SubgraphOperationKind { .. } => ctx + .get::<_, String>(OPERATION_KIND) + .ok() + .flatten() + .map(opentelemetry::Value::from), + SubgraphSelector::SupergraphOperationKind { .. } => ctx + .get::<_, String>(OPERATION_KIND) + .ok() + .flatten() + .map(opentelemetry::Value::from), + SubgraphSelector::SupergraphOperationName { + supergraph_operation_name, + default, + .. + } => { + let op_name = ctx.get(OPERATION_NAME).ok().flatten(); + match supergraph_operation_name { + OperationName::String => op_name.or_else(|| default.clone()), + OperationName::Hash => op_name.or_else(|| default.clone()).map(|op_name| { + let mut hasher = sha2::Sha256::new(); + hasher.update(op_name.as_bytes()); + let result = hasher.finalize(); + hex::encode(result) + }), + } + .map(opentelemetry::Value::from) + } SubgraphSelector::Error { .. } => Some(error.to_string().into()), SubgraphSelector::Static(val) => Some(val.clone().into()), SubgraphSelector::StaticField { r#static } => Some(r#static.clone().into()), @@ -2532,6 +2592,14 @@ mod test { assert_eq!( selector.on_request( &crate::services::SubgraphRequest::fake_builder() + .context(context.clone()) + .build(), + ), + Some("query".into()) + ); + assert_eq!( + selector.on_response( + &crate::services::SubgraphResponse::fake_builder() .context(context) .build(), ), @@ -2548,6 +2616,15 @@ mod test { assert_eq!( selector.on_request( &crate::services::SubgraphRequest::fake_builder() + .context(context.clone()) + .subgraph_name("test".to_string()) + .build(), + ), + Some("test".into()) + ); + assert_eq!( + selector.on_response( + &crate::services::SubgraphResponse::fake_builder() .context(context) .subgraph_name("test".to_string()) .build(), @@ -2683,6 +2760,14 @@ mod test { assert_eq!( selector.on_request( &crate::services::SubgraphRequest::fake_builder() + .context(context.clone()) + .build(), + ), + Some("topProducts".into()) + ); + assert_eq!( + selector.on_response( + &crate::services::SubgraphResponse::fake_builder() .context(context) .build(), ), diff --git a/apollo-router/src/plugins/telemetry/testdata/custom_events.router.yaml b/apollo-router/src/plugins/telemetry/testdata/custom_events.router.yaml index 1edfcd3abe..c3c23cb68f 100644 --- a/apollo-router/src/plugins/telemetry/testdata/custom_events.router.yaml +++ b/apollo-router/src/plugins/telemetry/testdata/custom_events.router.yaml @@ -116,9 +116,13 @@ telemetry: response: level: warn condition: - eq: - - subgraph_response_header: x-log-response - - "log" + all: + - eq: + - subgraph_response_header: x-log-response + - "log" + - eq: + - subgraph_name: true + - "subgraph" error: error # Custom events From d6ef11713194b861642b63890ba76de03d0335c1 Mon Sep 17 00:00:00 2001 From: Ahsan Naveed Date: Mon, 29 Jul 2024 05:05:42 -0700 Subject: [PATCH 015/108] Remove unnecessary cache warmup message displayed on router startup (#5617) Co-authored-by: ahsan-naveed Co-authored-by: Bryn Cooke --- .../src/query_planner/caching_query_planner.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index db4923f17c..9d063e1652 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -230,10 +230,13 @@ where } else { cache_keys.len() }; - tracing::info!( - "warming up the query plan cache with {} queries, this might take a while", - capacity - ); + + if capacity > 0 { + tracing::info!( + "warming up the query plan cache with {} queries, this might take a while", + capacity + ); + } // persisted queries are added first because they should get a lower priority in the LRU cache, // since a lot of them may be there to support old clients From 6c727c4a94456e04aad4c23e94f518b97aa72293 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Mon, 29 Jul 2024 13:20:03 +0100 Subject: [PATCH 016/108] Optimize span metrics (#5517) Co-authored-by: bryn Co-authored-by: Coenen Benjamin --- apollo-router/src/metrics/mod.rs | 269 +++++++++--------- .../metrics/span_metrics_exporter.rs | 38 ++- 2 files changed, 167 insertions(+), 140 deletions(-) diff --git a/apollo-router/src/metrics/mod.rs b/apollo-router/src/metrics/mod.rs index 2e0fbf2ca5..e24317cd06 100644 --- a/apollo-router/src/metrics/mod.rs +++ b/apollo-router/src/metrics/mod.rs @@ -329,12 +329,12 @@ pub(crate) mod test_utils { } } - #[derive(Serialize, Eq, PartialEq, Default)] + #[derive(Clone, Serialize, Eq, PartialEq, Default)] pub(crate) struct SerdeMetricData { pub(crate) datapoints: Vec, } - #[derive(Serialize, Eq, PartialEq)] + #[derive(Clone, Serialize, Eq, PartialEq)] pub(crate) struct SerdeMetricDataPoint { #[serde(skip_serializing_if = "Option::is_none")] pub(crate) value: Option, @@ -421,14 +421,14 @@ pub(crate) mod test_utils { attributes: value .attributes .iter() - .map(|(k, v)| (k.as_str().to_string(), Self::to_value(v))) + .map(|(k, v)| (k.as_str().to_string(), Self::convert(v))) .collect(), } } } impl SerdeMetricDataPoint { - pub(crate) fn to_value(v: &Value) -> serde_json::Value { + pub(crate) fn convert(v: &Value) -> serde_json::Value { match v.clone() { Value::Bool(v) => v.into(), Value::I64(v) => v.into(), @@ -455,7 +455,7 @@ pub(crate) mod test_utils { attributes: value .attributes .iter() - .map(|(k, v)| (k.as_str().to_string(), Self::to_value(v))) + .map(|(k, v)| (k.as_str().to_string(), Self::convert(v))) .collect(), } } @@ -509,23 +509,23 @@ pub(crate) fn meter_provider() -> AggregateMeterProvider { #[allow(unused_macros)] macro_rules! u64_counter { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(u64, counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(u64, counter, add, stringify!($($name).+), $description, $value, attributes); }; ($($name:ident).+, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(u64, counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(u64, counter, add, stringify!($($name).+), $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(u64, counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(u64, counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(u64, counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(u64, counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $attrs: expr) => { @@ -550,23 +550,23 @@ macro_rules! u64_counter { #[allow(unused_macros)] macro_rules! f64_counter { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(f64, counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(f64, counter, add, stringify!($($name).+), $description, $value, attributes); }; ($($name:ident).+, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(f64, counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(f64, counter, add, stringify!($($name).+), $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(f64, counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(f64, counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(f64, counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(f64, counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $attrs: expr) => { metric!(f64, counter, add, $name, $description, $value, $attrs); @@ -591,23 +591,23 @@ macro_rules! f64_counter { #[allow(unused_macros)] macro_rules! i64_up_down_counter { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(i64, up_down_counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(i64, up_down_counter, add, stringify!($($name).+), $description, $value, attributes); }; ($($name:ident).+, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(i64, up_down_counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(i64, up_down_counter, add, stringify!($($name).+), $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(i64, up_down_counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(i64, up_down_counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(i64, up_down_counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(i64, up_down_counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $attrs: expr) => { @@ -632,23 +632,23 @@ macro_rules! i64_up_down_counter { #[allow(unused_macros)] macro_rules! f64_up_down_counter { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(f64, up_down_counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(f64, up_down_counter, add, stringify!($($name).+), $description, $value, attributes); }; ($($name:ident).+, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(f64, up_down_counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(f64, up_down_counter, add, stringify!($($name).+), $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(f64, up_down_counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(f64, up_down_counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(f64, up_down_counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(f64, up_down_counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $attrs: expr) => { @@ -673,23 +673,23 @@ macro_rules! f64_up_down_counter { #[allow(unused_macros)] macro_rules! f64_histogram { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(f64, histogram, record, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(f64, histogram, record, stringify!($($name).+), $description, $value, attributes); }; ($($name:ident).+, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(f64, histogram, record, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(f64, histogram, record, stringify!($($name).+), $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(f64, histogram, record, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(f64, histogram, record, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(f64, histogram, record, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(f64, histogram, record, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $attrs: expr) => { @@ -714,23 +714,23 @@ macro_rules! f64_histogram { #[allow(unused_macros)] macro_rules! u64_histogram { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(u64, histogram, record, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(u64, histogram, record, stringify!($($name).+), $description, $value, attributes); }; ($($name:ident).+, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(u64, histogram, record, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(u64, histogram, record, stringify!($($name).+), $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(u64, histogram, record, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(u64, histogram, record, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(u64, histogram, record, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(u64, histogram, record, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $attrs: expr) => { @@ -755,23 +755,23 @@ macro_rules! u64_histogram { #[allow(unused_macros)] macro_rules! i64_histogram { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(i64, histogram, record, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(i64, histogram, record, stringify!($($name).+), $description, $value, attributes); }; ($($name:ident).+, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(i64, histogram, record, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(i64, histogram, record, stringify!($($name).+), $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(i64, histogram, record, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(i64, histogram, record, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(i64, histogram, record, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(i64, histogram, record, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $attrs: expr) => { @@ -855,7 +855,7 @@ macro_rules! assert_metric { description: "".to_string(), unit: "".to_string(), data: crate::metrics::test_utils::SerdeMetricData { - datapoints: vec![crate::metrics::test_utils::SerdeMetricDataPoint { + datapoints: [crate::metrics::test_utils::SerdeMetricDataPoint { value: $value, sum: $sum, attributes: $attrs @@ -863,13 +863,14 @@ macro_rules! assert_metric { .map(|kv: &opentelemetry::KeyValue| { ( kv.key.to_string(), - crate::metrics::test_utils::SerdeMetricDataPoint::to_value( + crate::metrics::test_utils::SerdeMetricDataPoint::convert( &kv.value, ), ) }) - .collect(), - }], + .collect::>(), + }] + .to_vec(), }, }; panic!( @@ -885,28 +886,28 @@ macro_rules! assert_metric { macro_rules! assert_counter { ($($name:ident).+, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { let name = stringify!($($name).+); - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(name, crate::metrics::test_utils::MetricType::Counter, $value, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(name, crate::metrics::test_utils::MetricType::Counter, $value, attributes); assert_metric!(result, name, Some($value.into()), None, &attributes); }; ($($name:ident).+, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { let name = stringify!($($name).+); - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(name, crate::metrics::test_utils::MetricType::Counter, $value, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(name, crate::metrics::test_utils::MetricType::Counter, $value, attributes); assert_metric!(result, name, Some($value.into()), None, &attributes); }; ($name:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Counter, $value, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Counter, $value, attributes); assert_metric!(result, $name, Some($value.into()), None, &attributes); }; ($name:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Counter, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Counter, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, &attributes); }; ($name:literal, $value: expr) => { @@ -919,27 +920,27 @@ macro_rules! assert_counter { macro_rules! assert_up_down_counter { ($($name:ident).+, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::UpDownCounter, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::UpDownCounter, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($($name:ident).+, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::UpDownCounter, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::UpDownCounter, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($name:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::UpDownCounter, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::UpDownCounter, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($name:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::UpDownCounter, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::UpDownCounter, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($name:literal, $value: expr) => { @@ -952,27 +953,27 @@ macro_rules! assert_up_down_counter { macro_rules! assert_gauge { ($($name:ident).+, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Gauge, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Gauge, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($($name:ident).+, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Gauge, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Gauge, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($name:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Gauge, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Gauge, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($name:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Gauge, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Gauge, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($name:literal, $value: expr) => { @@ -985,27 +986,27 @@ macro_rules! assert_gauge { macro_rules! assert_histogram_sum { ($($name:ident).+, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, $value, &attributes); - assert_metric!(result, $name, None, Some($value.into()), &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, $value, attributes); + assert_metric!(result, $name, None, Some($value.into()), attributes); }; ($($name:ident).+, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, $value, &attributes); - assert_metric!(result, $name, None, Some($value.into()), &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, $value, attributes); + assert_metric!(result, $name, None, Some($value.into()), attributes); }; ($name:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Histogram, $value, &attributes); - assert_metric!(result, $name, None, Some($value.into()), &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Histogram, $value, attributes); + assert_metric!(result, $name, None, Some($value.into()), attributes); }; ($name:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Histogram, $value, &attributes); - assert_metric!(result, $name, None, Some($value.into()), &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Histogram, $value, attributes); + assert_metric!(result, $name, None, Some($value.into()), attributes); }; ($name:literal, $value: expr) => { @@ -1018,27 +1019,27 @@ macro_rules! assert_histogram_sum { macro_rules! assert_histogram_exists { ($($name:ident).+, $value: ty, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(result, $name, None, None, attributes); }; ($($name:ident).+, $value: ty, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(result, $name, None, None, attributes); }; ($name:literal, $value: ty, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(result, $name, None, None, attributes); }; ($name:literal, $value: ty, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(result, $name, None, None, attributes); }; ($name:literal, $value: ty) => { @@ -1051,27 +1052,27 @@ macro_rules! assert_histogram_exists { macro_rules! assert_histogram_not_exists { ($($name:ident).+, $value: ty, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(!result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(!result, $name, None, None, attributes); }; ($($name:ident).+, $value: ty, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(!result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(!result, $name, None, None, attributes); }; ($name:literal, $value: ty, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(!result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(!result, $name, None, None, attributes); }; ($name:literal, $value: ty, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(!result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(!result, $name, None, None, attributes); }; ($name:literal, $value: ty) => { diff --git a/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs b/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs index a5869daad5..6129b6b9e4 100644 --- a/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs +++ b/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs @@ -1,6 +1,8 @@ use std::collections::HashSet; use std::time::Instant; +use opentelemetry_api::KeyValue; +use opentelemetry_api::Value; use tracing_core::field::Visit; use tracing_core::span; use tracing_core::Field; @@ -70,14 +72,15 @@ where let idle: f64 = timings.idle as f64 / 1_000_000_000_f64; let busy: f64 = timings.busy as f64 / 1_000_000_000_f64; let name = span.metadata().name(); + if let Some(subgraph_name) = timings.subgraph.take() { - ::tracing::info!(histogram.apollo_router_span = duration, kind = %"duration", span = %name, subgraph = %subgraph_name); - ::tracing::info!(histogram.apollo_router_span = idle, kind = %"idle", span = %name, subgraph = %subgraph_name); - ::tracing::info!(histogram.apollo_router_span = busy, kind = %"busy", span = %name, subgraph = %subgraph_name); + record(duration, "duration", name, Some(&subgraph_name)); + record(duration, "idle", name, Some(&subgraph_name)); + record(duration, "busy", name, Some(&subgraph_name)); } else { - ::tracing::info!(histogram.apollo_router_span = duration, kind = %"duration", span = %name); - ::tracing::info!(histogram.apollo_router_span = idle, kind = %"idle", span = %name); - ::tracing::info!(histogram.apollo_router_span = busy, kind = %"busy", span = %name); + record(duration, "duration", name, None); + record(idle, "idle", name, None); + record(busy, "busy", name, None); } } } @@ -105,6 +108,29 @@ where } } +fn record(duration: f64, kind: &'static str, name: &str, subgraph_name: Option<&str>) { + // Avoid a heap allocation for a vec by using a slice + let attrs = [ + KeyValue::new("kind", kind), + KeyValue::new("span", Value::String(name.to_string().into())), + KeyValue::new( + "subgraph", + Value::String( + subgraph_name + .map(|s| s.to_string().into()) + .unwrap_or_else(|| "".into()), + ), + ), + ]; + let splice = if subgraph_name.is_some() { + &attrs + } else { + &attrs[0..2] + }; + + f64_histogram!("apollo_router_span", "Duration of span", duration, splice); +} + struct Timings { idle: i64, busy: i64, From 961d63f673a651a27fe74a08f34b5834c3687432 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Tue, 30 Jul 2024 09:07:14 +0200 Subject: [PATCH 017/108] fix(federation): prevent reuse_fragments() from introducing new variable references (#5730) --- apollo-federation/src/operation/mod.rs | 82 +++---- apollo-federation/src/operation/optimize.rs | 229 ++++++++++++++---- apollo-federation/src/operation/simplify.rs | 6 +- apollo-federation/src/operation/tests/mod.rs | 2 - .../src/query_plan/fetch_dependency_graph.rs | 6 +- 5 files changed, 222 insertions(+), 103 deletions(-) diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index b04cdefeb3..b2aa7c3a5b 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -4034,96 +4034,86 @@ fn collect_variables_from_directive<'selection>( variables: &mut HashSet<&'selection Name>, ) { for arg in directive.arguments.iter() { - collect_variables_from_value(&arg.value, variables) + collect_variables_from_value(&arg.value, variables); } } impl Field { fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { for arg in self.arguments.iter() { - collect_variables_from_value(&arg.value, variables) + collect_variables_from_value(&arg.value, variables); } for dir in self.directives.iter() { - collect_variables_from_directive(dir, variables) + collect_variables_from_directive(dir, variables); } } } impl FieldSelection { - /// # Errors - /// Returns an error if the selection contains a named fragment spread. - fn collect_variables<'selection>( - &'selection self, - variables: &mut HashSet<&'selection Name>, - ) -> Result<(), FederationError> { + fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { self.field.collect_variables(variables); if let Some(set) = &self.selection_set { - set.collect_variables(variables)? + set.collect_variables(variables); } - Ok(()) } } impl InlineFragment { fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { for dir in self.directives.iter() { - collect_variables_from_directive(dir, variables) + collect_variables_from_directive(dir, variables); } } } impl InlineFragmentSelection { - /// # Errors - /// Returns an error if the selection contains a named fragment spread. - fn collect_variables<'selection>( - &'selection self, - variables: &mut HashSet<&'selection Name>, - ) -> Result<(), FederationError> { + fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { self.inline_fragment.collect_variables(variables); - self.selection_set.collect_variables(variables) + self.selection_set.collect_variables(variables); + } +} + +impl FragmentSpread { + fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { + for dir in self.directives.iter() { + collect_variables_from_directive(dir, variables); + } + for dir in self.fragment_directives.iter() { + collect_variables_from_directive(dir, variables); + } + } +} + +impl FragmentSpreadSelection { + fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { + self.spread.collect_variables(variables); + self.selection_set.collect_variables(variables); } } impl Selection { - /// # Errors - /// Returns an error if the selection contains a named fragment spread. - fn collect_variables<'selection>( - &'selection self, - variables: &mut HashSet<&'selection Name>, - ) -> Result<(), FederationError> { + fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { match self { Selection::Field(field) => field.collect_variables(variables), - Selection::InlineFragment(inline_fragment) => { - inline_fragment.collect_variables(variables) - } - Selection::FragmentSpread(_) => Err(FederationError::internal( - "collect_variables(): unexpected fragment spread", - )), + Selection::InlineFragment(frag) => frag.collect_variables(variables), + Selection::FragmentSpread(frag) => frag.collect_variables(variables), } } } impl SelectionSet { - /// Returns the variable names that are used by this selection set. - /// - /// # Errors - /// Returns an error if the selection set contains a named fragment spread. - pub(crate) fn used_variables(&self) -> Result, FederationError> { + /// Returns the variable names that are used by this selection set, including through fragment + /// spreads. + pub(crate) fn used_variables(&self) -> HashSet<&'_ Name> { let mut variables = HashSet::new(); - self.collect_variables(&mut variables)?; - Ok(variables) + self.collect_variables(&mut variables); + variables } - /// # Errors - /// Returns an error if the selection set contains a named fragment spread. - fn collect_variables<'selection>( - &'selection self, - variables: &mut HashSet<&'selection Name>, - ) -> Result<(), FederationError> { + fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { for selection in self.selections.values() { - selection.collect_variables(variables)? + selection.collect_variables(variables); } - Ok(()) } } diff --git a/apollo-federation/src/operation/optimize.rs b/apollo-federation/src/operation/optimize.rs index 12d1642158..3f21309a10 100644 --- a/apollo-federation/src/operation/optimize.rs +++ b/apollo-federation/src/operation/optimize.rs @@ -37,10 +37,10 @@ use std::collections::HashMap; use std::collections::HashSet; -use std::ops::Not; use std::sync::Arc; use apollo_compiler::executable; +use apollo_compiler::executable::VariableDefinition; use apollo_compiler::Name; use apollo_compiler::Node; @@ -61,6 +61,32 @@ use super::SelectionSet; use crate::error::FederationError; use crate::schema::position::CompositeTypeDefinitionPosition; +#[derive(Debug)] +struct ReuseContext<'a> { + fragments: &'a NamedFragments, + operation_variables: Option>, +} + +impl<'a> ReuseContext<'a> { + fn for_fragments(fragments: &'a NamedFragments) -> Self { + Self { + fragments, + operation_variables: None, + } + } + + // Taking two separate parameters so the caller can still mutate the operation's selection set. + fn for_operation( + fragments: &'a NamedFragments, + operation_variables: &'a [Node], + ) -> Self { + Self { + fragments, + operation_variables: Some(operation_variables.iter().map(|var| &var.name).collect()), + } + } +} + //============================================================================= // Add __typename field for abstract types in named fragment definitions @@ -86,7 +112,7 @@ impl NamedFragments { )?; let mut mapped_selection_set = mapper(&expanded_selection_set)?; // `mapped_selection_set` must be fragment-spread-free. - mapped_selection_set.reuse_fragments(&result)?; + mapped_selection_set.reuse_fragments(&ReuseContext::for_fragments(&result))?; let updated = Fragment { selection_set: mapped_selection_set, schema: fragment.schema.clone(), @@ -665,7 +691,7 @@ impl Fragment { ty: &CompositeTypeDefinitionPosition, ) -> Result { let expanded_selection_set = self.selection_set.expand_all_fragments()?; - let normalized_selection_set = expanded_selection_set.flatten_unnecessary_fragments( + let selection_set = expanded_selection_set.flatten_unnecessary_fragments( ty, /*named_fragments*/ &Default::default(), &self.schema, @@ -677,7 +703,7 @@ impl Fragment { // Thus, we have to use the full validator in this case. (see // https://github.com/graphql/graphql-spec/issues/1085 for details.) return Ok(FragmentRestrictionAtType::new( - normalized_selection_set.clone(), + selection_set.clone(), Some(FieldsConflictValidator::from_selection_set( &expanded_selection_set, )), @@ -693,13 +719,11 @@ impl Fragment { // validator because we know the non-trimmed parts cannot create field conflict issues so // we're trying to build a smaller validator, but it's ok if trimmed is not as small as it // theoretically can be. - let trimmed = expanded_selection_set.minus(&normalized_selection_set)?; - let validator = trimmed - .is_empty() - .not() - .then(|| FieldsConflictValidator::from_selection_set(&trimmed)); + let trimmed = expanded_selection_set.minus(&selection_set)?; + let validator = + (!trimmed.is_empty()).then(|| FieldsConflictValidator::from_selection_set(&trimmed)); Ok(FragmentRestrictionAtType::new( - normalized_selection_set.clone(), + selection_set.clone(), validator, )) } @@ -782,7 +806,8 @@ enum SelectionSetOrFragment { } impl SelectionSet { - /// Reduce the list of applicable fragments by eliminating ones that are subsumed by another. + /// Reduce the list of applicable fragments by eliminating fragments that directly include + /// another fragment. // // We have found the list of fragments that applies to some subset of sub-selection. In // general, we want to now produce the selection set with spread for those fragments plus @@ -861,7 +886,7 @@ impl SelectionSet { fn try_apply_fragments( &self, parent_type: &CompositeTypeDefinitionPosition, - fragments: &NamedFragments, + context: &ReuseContext<'_>, validator: &mut FieldsConflictMultiBranchValidator, fragments_at_type: &mut FragmentRestrictionAtTypeCache, full_match_condition: FullMatchingFragmentCondition, @@ -873,7 +898,9 @@ impl SelectionSet { // fragment whose type _is_ the fragment condition (at which point, this // `can_apply_directly_at_type` method will apply. Also note that this is because we have // this restriction that calling `expanded_selection_set_at_type` is ok. - let candidates = fragments.get_all_may_apply_directly_at_type(parent_type); + let candidates = context + .fragments + .get_all_may_apply_directly_at_type(parent_type); // First, we check which of the candidates do apply inside the selection set, if any. If we // find a candidate that applies to the whole selection set, then we stop and only return @@ -888,6 +915,27 @@ impl SelectionSet { continue; } + // I don't love this, but fragments may introduce new fields to the operation, including + // fields that use variables that are not declared in the operation. There are two ways + // to work around this: adjusting the fragments so they only list the fields that we + // actually need, or excluding fragments that introduce variable references from reuse. + // The former would be ideal, as we would not execute more fields than required. It's + // also much trickier to do. The latter fixes this particular issue but leaves the + // output in a less than ideal state. + // The consideration here is: `generate_query_fragments` has significant advantages + // over fragment reuse, and so we do not want to invest a lot of time into improving + // fragment reuse. We do the simple, less-than-ideal thing. + if let Some(variable_definitions) = &context.operation_variables { + let fragment_variables = candidate.selection_set.used_variables(); + if fragment_variables + .difference(variable_definitions) + .next() + .is_some() + { + continue; + } + } + // As we check inclusion, we ignore the case where the fragment queries __typename // but the `self` does not. The rational is that querying `__typename` // unnecessarily is mostly harmless (it always works and it's super cheap) so we @@ -1220,17 +1268,17 @@ impl NamedFragments { impl Selection { fn reuse_fragments_inner( &self, - fragments: &NamedFragments, + context: &ReuseContext<'_>, validator: &mut FieldsConflictMultiBranchValidator, fragments_at_type: &mut FragmentRestrictionAtTypeCache, ) -> Result { match self { Selection::Field(field) => Ok(field - .reuse_fragments_inner(fragments, validator, fragments_at_type)? + .reuse_fragments_inner(context, validator, fragments_at_type)? .into()), Selection::FragmentSpread(_) => Ok(self.clone()), // Do nothing Selection::InlineFragment(inline_fragment) => Ok(inline_fragment - .reuse_fragments_inner(fragments, validator, fragments_at_type)? + .reuse_fragments_inner(context, validator, fragments_at_type)? .into()), } } @@ -1239,7 +1287,7 @@ impl Selection { impl FieldSelection { fn reuse_fragments_inner( &self, - fragments: &NamedFragments, + context: &ReuseContext<'_>, validator: &mut FieldsConflictMultiBranchValidator, fragments_at_type: &mut FragmentRestrictionAtTypeCache, ) -> Result { @@ -1257,28 +1305,24 @@ impl FieldSelection { // First, see if we can reuse fragments for the selection of this field. let opt = selection_set.try_apply_fragments( &base_composite_type, - fragments, + context, &mut field_validator, fragments_at_type, FullMatchingFragmentCondition::ForFieldSelection, )?; - let mut optimized; - match opt { + let mut optimized = match opt { SelectionSetOrFragment::Fragment(fragment) => { let fragment_selection = FragmentSpreadSelection::from_fragment( &fragment, /*directives*/ &Default::default(), ); - optimized = - SelectionSet::from_selection(base_composite_type, fragment_selection.into()); - } - SelectionSetOrFragment::SelectionSet(selection_set) => { - optimized = selection_set; + SelectionSet::from_selection(base_composite_type, fragment_selection.into()) } - } + SelectionSetOrFragment::SelectionSet(selection_set) => selection_set, + }; optimized = - optimized.reuse_fragments_inner(fragments, &mut field_validator, fragments_at_type)?; + optimized.reuse_fragments_inner(context, &mut field_validator, fragments_at_type)?; Ok(self.with_updated_selection_set(Some(optimized))) } } @@ -1303,17 +1347,17 @@ impl From for Selection { impl InlineFragmentSelection { fn reuse_fragments_inner( &self, - fragments: &NamedFragments, + context: &ReuseContext<'_>, validator: &mut FieldsConflictMultiBranchValidator, fragments_at_type: &mut FragmentRestrictionAtTypeCache, ) -> Result { - let mut optimized = self.selection_set.clone(); + let optimized; let type_condition_position = &self.inline_fragment.type_condition_position; if let Some(type_condition_position) = type_condition_position { let opt = self.selection_set.try_apply_fragments( type_condition_position, - fragments, + context, validator, fragments_at_type, FullMatchingFragmentCondition::ForInlineFragmentSelection { @@ -1358,34 +1402,37 @@ impl InlineFragmentSelection { ) .into(), ); - // fall-through } } SelectionSetOrFragment::SelectionSet(selection_set) => { optimized = selection_set; - // fall-through } } + } else { + optimized = self.selection_set.clone(); } - // Then, recurse inside the field sub-selection (note that if we matched some fragments - // above, this recursion will "ignore" those as `FragmentSpreadSelection`'s - // `reuse_fragments()` is a no-op). - optimized = optimized.reuse_fragments_inner(fragments, validator, fragments_at_type)?; - Ok(InlineFragmentSelection::new(self.inline_fragment.clone(), optimized).into()) + Ok(InlineFragmentSelection::new( + self.inline_fragment.clone(), + // Then, recurse inside the field sub-selection (note that if we matched some fragments + // above, this recursion will "ignore" those as `FragmentSpreadSelection`'s + // `reuse_fragments()` is a no-op). + optimized.reuse_fragments_inner(context, validator, fragments_at_type)?, + ) + .into()) } } impl SelectionSet { fn reuse_fragments_inner( &self, - fragments: &NamedFragments, + context: &ReuseContext<'_>, validator: &mut FieldsConflictMultiBranchValidator, fragments_at_type: &mut FragmentRestrictionAtTypeCache, ) -> Result { - self.lazy_map(fragments, |selection| { + self.lazy_map(context.fragments, |selection| { Ok(selection - .reuse_fragments_inner(fragments, validator, fragments_at_type)? + .reuse_fragments_inner(context, validator, fragments_at_type)? .into()) }) } @@ -1402,16 +1449,16 @@ impl SelectionSet { /// ## Errors /// Returns an error if the selection set contains a named fragment spread. - fn reuse_fragments(&mut self, fragments: &NamedFragments) -> Result<(), FederationError> { - if fragments.is_empty() { + fn reuse_fragments(&mut self, context: &ReuseContext<'_>) -> Result<(), FederationError> { + if context.fragments.is_empty() { return Ok(()); } if self.contains_fragment_spread() { - return Err(FederationError::internal("optimize() must only be used on selection sets that do not contain named fragment spreads")); + return Err(FederationError::internal("reuse_fragments() must only be used on selection sets that do not contain named fragment spreads")); } - // Calling optimize() will not match a fragment that would have expanded at + // Calling reuse_fragments() will not match a fragment that would have expanded at // top-level. That is, say we have the selection set `{ x y }` for a top-level `Query`, and // we have a fragment // ``` @@ -1420,12 +1467,12 @@ impl SelectionSet { // y // } // ``` - // then calling `self.optimize(fragments)` would only apply check if F apply to + // then calling `self.reuse_fragments(fragments)` would only apply check if F apply to // `x` and then `y`. // // To ensure the fragment match in this case, we "wrap" the selection into a trivial // fragment of the selection parent, so in the example above, we create selection `... on - // Query { x y}`. With that, `optimize` will correctly match on the `on Query` + // Query { x y }`. With that, `reuse_fragments` will correctly match on the `on Query` // fragment; after which we can unpack the final result. let wrapped = InlineFragmentSelection::from_selection_set( self.type_position.clone(), // parent type @@ -1436,7 +1483,7 @@ impl SelectionSet { FieldsConflictValidator::from_selection_set(self), ); let optimized = wrapped.reuse_fragments_inner( - fragments, + context, &mut validator, &mut FragmentRestrictionAtTypeCache::default(), )?; @@ -1456,7 +1503,7 @@ impl SelectionSet { } impl Operation { - // PORT_NOTE: The JS version of `optimize` takes an optional `minUsagesToOptimize` argument. + // PORT_NOTE: The JS version of `reuse_fragments` takes an optional `minUsagesToOptimize` argument. // However, it's only used in tests. So, it's removed in the Rust version. const DEFAULT_MIN_USAGES_TO_OPTIMIZE: u32 = 2; @@ -1473,7 +1520,8 @@ impl Operation { // Optimize the operation's selection set by re-using existing fragments. let before_optimization = self.selection_set.clone(); - self.selection_set.reuse_fragments(fragments)?; + self.selection_set + .reuse_fragments(&ReuseContext::for_operation(fragments, &self.variables))?; if before_optimization == self.selection_set { return Ok(()); } @@ -3148,6 +3196,89 @@ mod tests { "###); } + #[test] + fn reuse_fragments_with_non_intersecting_types() { + let schema = r#" + type Query { + t: T + s: S + s2: S + i: I + } + + interface I { + a: Int + b: Int + } + + type T implements I { + a: Int + b: Int + + c: Int + d: Int + } + type S implements I { + a: Int + b: Int + + f: Int + g: Int + } + "#; + let query = r#" + query A ($if: Boolean!) { + t { ...x } + s { ...x } + i { ...x } + } + query B { + s { + # this matches fragment x once it is flattened, + # because the `...on T` condition does not intersect with our + # current type `S` + __typename + a b + } + s2 { + # same snippet to get it to use the fragment + __typename + a b + } + } + fragment x on I { + __typename + a + b + ... on T { c d @include(if: $if) } + } + "#; + let schema = parse_schema(schema); + let query = ExecutableDocument::parse_and_validate(schema.schema(), query, "query.graphql") + .unwrap(); + + let operation_a = + Operation::from_operation_document(schema.clone(), &query, Some("A")).unwrap(); + let operation_b = + Operation::from_operation_document(schema.clone(), &query, Some("B")).unwrap(); + let expanded_b = operation_b.expand_all_fragments_and_normalize().unwrap(); + + assert_optimized!(expanded_b, operation_a.named_fragments, @r###" + query B { + s { + __typename + a + b + } + s2 { + __typename + a + b + } + } + "###); + } + /// /// empty branches removal /// diff --git a/apollo-federation/src/operation/simplify.rs b/apollo-federation/src/operation/simplify.rs index 95ae8ad243..89fb42f110 100644 --- a/apollo-federation/src/operation/simplify.rs +++ b/apollo-federation/src/operation/simplify.rs @@ -158,12 +158,12 @@ impl InlineFragmentSelection { named_fragments: &NamedFragments, schema: &ValidFederationSchema, ) -> Result, FederationError> { - let this_condition = self.inline_fragment.type_condition_position.clone(); + let this_condition = self.inline_fragment.type_condition_position.as_ref(); // This method assumes by contract that `parent_type` runtimes intersects `self.inline_fragment.parent_type_position`'s, // but `parent_type` runtimes may be a subset. So first check if the selection should not be discarded on that account (that // is, we should not keep the selection if its condition runtimes don't intersect at all with those of // `parent_type` as that would ultimately make an invalid selection set). - if let Some(ref type_condition) = this_condition { + if let Some(type_condition) = this_condition { if (self.inline_fragment.schema != *schema || self.inline_fragment.parent_type_position != *parent_type) && !runtime_types_intersect(type_condition, parent_type, schema) @@ -182,7 +182,7 @@ impl InlineFragmentSelection { // cannot be restricting things further (it's typically a less precise interface/union). let useless_fragment = match this_condition { None => true, - Some(ref c) => self.inline_fragment.schema == *schema && c == parent_type, + Some(c) => self.inline_fragment.schema == *schema && c == parent_type, }; if useless_fragment || parent_type.is_object_type() { // Try to skip this fragment and flatten_unnecessary_fragments self.selection_set with `parent_type`, diff --git a/apollo-federation/src/operation/tests/mod.rs b/apollo-federation/src/operation/tests/mod.rs index b2081d0bcc..d90760d341 100644 --- a/apollo-federation/src/operation/tests/mod.rs +++ b/apollo-federation/src/operation/tests/mod.rs @@ -1614,7 +1614,6 @@ fn used_variables() { let mut variables = operation .selection_set .used_variables() - .unwrap() .into_iter() .collect::>(); variables.sort(); @@ -1633,7 +1632,6 @@ fn used_variables() { .as_ref() .unwrap() .used_variables() - .unwrap() .into_iter() .collect::>(); variables.sort(); diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index d5425e2187..5fdfcb99d7 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -2347,7 +2347,7 @@ impl FetchDependencyGraphNode { let subgraph_schema = query_graph.schema_by_source(&self.subgraph_name)?; let variable_usages = { - let set = selection.used_variables()?; + let set = selection.used_variables(); let mut list = set.into_iter().cloned().collect::>(); list.sort(); list @@ -2541,7 +2541,7 @@ fn operation_for_entities_fetch( let mut variable_definitions: Vec> = Vec::with_capacity(all_variable_definitions.len() + 1); variable_definitions.push(representations_variable_definition(subgraph_schema)?); - let used_variables = selection_set.used_variables()?; + let used_variables = selection_set.used_variables(); variable_definitions.extend( all_variable_definitions .iter() @@ -2624,7 +2624,7 @@ fn operation_for_query_fetch( variable_definitions: &[Node], operation_name: &Option, ) -> Result { - let used_variables = selection_set.used_variables()?; + let used_variables = selection_set.used_variables(); let variable_definitions = variable_definitions .iter() .filter(|definition| used_variables.contains(&definition.name)) From e16c9bf2fbb80cdb0105ecd914d624ab357db3ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Tue, 30 Jul 2024 09:07:24 +0200 Subject: [PATCH 018/108] chore(federation): move selection merging to a module (#5737) --- apollo-federation/src/operation/merging.rs | 398 +++++++++++++++++++++ apollo-federation/src/operation/mod.rs | 340 +----------------- 2 files changed, 400 insertions(+), 338 deletions(-) create mode 100644 apollo-federation/src/operation/merging.rs diff --git a/apollo-federation/src/operation/merging.rs b/apollo-federation/src/operation/merging.rs new file mode 100644 index 0000000000..4c2b31cbd3 --- /dev/null +++ b/apollo-federation/src/operation/merging.rs @@ -0,0 +1,398 @@ +//! Provides methods for recursively merging selections and selection sets. +use std::sync::Arc; + +use apollo_compiler::collections::IndexMap; + +use super::selection_map; +use super::FieldSelection; +use super::FieldSelectionValue; +use super::FragmentSpreadSelection; +use super::FragmentSpreadSelectionValue; +use super::HasSelectionKey as _; +use super::InlineFragmentSelection; +use super::InlineFragmentSelectionValue; +use super::NamedFragments; +use super::Selection; +use super::SelectionSet; +use super::SelectionValue; +use crate::error::FederationError; + +impl<'a> FieldSelectionValue<'a> { + /// Merges the given field selections into this one. + /// + /// # Preconditions + /// All selections must have the same selection key (alias + directives). Otherwise + /// this function produces invalid output. + /// + /// # Errors + /// Returns an error if: + /// - The parent type or schema of any selection does not match `self`'s. + /// - Any selection does not select the same field position as `self`. + fn merge_into<'op>( + &mut self, + others: impl Iterator, + ) -> Result<(), FederationError> { + let self_field = &self.get().field; + let mut selection_sets = vec![]; + for other in others { + let other_field = &other.field; + if other_field.schema != self_field.schema { + return Err(FederationError::internal( + "Cannot merge field selections from different schemas", + )); + } + if other_field.field_position != self_field.field_position { + return Err(FederationError::internal(format!( + "Cannot merge field selection for field \"{}\" into a field selection for field \"{}\"", + other_field.field_position, + self_field.field_position, + ))); + } + if self.get().selection_set.is_some() { + let Some(other_selection_set) = &other.selection_set else { + return Err(FederationError::internal(format!( + "Field \"{}\" has composite type but not a selection set", + other_field.field_position, + ))); + }; + selection_sets.push(other_selection_set); + } else if other.selection_set.is_some() { + return Err(FederationError::internal(format!( + "Field \"{}\" has non-composite type but also has a selection set", + other_field.field_position, + ))); + } + } + if let Some(self_selection_set) = self.get_selection_set_mut() { + self_selection_set.merge_into(selection_sets.into_iter())?; + } + Ok(()) + } +} + +impl<'a> InlineFragmentSelectionValue<'a> { + /// Merges the given normalized inline fragment selections into this one. + /// + /// # Preconditions + /// All selections must have the same selection key (directives). Otherwise this function + /// produces invalid output. + /// + /// # Errors + /// Returns an error if the parent type or schema of any selection does not match `self`'s. + fn merge_into<'op>( + &mut self, + others: impl Iterator, + ) -> Result<(), FederationError> { + let self_inline_fragment = &self.get().inline_fragment; + let mut selection_sets = vec![]; + for other in others { + let other_inline_fragment = &other.inline_fragment; + if other_inline_fragment.schema != self_inline_fragment.schema { + return Err(FederationError::internal( + "Cannot merge inline fragment from different schemas", + )); + } + if other_inline_fragment.parent_type_position + != self_inline_fragment.parent_type_position + { + return Err(FederationError::internal( + format!( + "Cannot merge inline fragment of parent type \"{}\" into an inline fragment of parent type \"{}\"", + other_inline_fragment.parent_type_position, + self_inline_fragment.parent_type_position, + ), + )); + } + selection_sets.push(&other.selection_set); + } + self.get_selection_set_mut() + .merge_into(selection_sets.into_iter())?; + Ok(()) + } +} + +impl<'a> FragmentSpreadSelectionValue<'a> { + /// Merges the given normalized fragment spread selections into this one. + /// + /// # Preconditions + /// All selections must have the same selection key (fragment name + directives). + /// Otherwise this function produces invalid output. + /// + /// # Errors + /// Returns an error if the parent type or schema of any selection does not match `self`'s. + fn merge_into<'op>( + &mut self, + others: impl Iterator, + ) -> Result<(), FederationError> { + let self_fragment_spread = &self.get().spread; + for other in others { + let other_fragment_spread = &other.spread; + if other_fragment_spread.schema != self_fragment_spread.schema { + return Err(FederationError::internal( + "Cannot merge fragment spread from different schemas", + )); + } + // Nothing to do since the fragment spread is already part of the selection set. + // Fragment spreads are uniquely identified by fragment name and applied directives. + // Since there is already an entry for the same fragment spread, there is no point + // in attempting to merge its sub-selections, as the underlying entry should be + // exactly the same as the currently processed one. + } + Ok(()) + } +} + +impl SelectionSet { + /// NOTE: This is a private API and should be used with care, use `add_selection_set` instead. + /// + /// Merges the given normalized selection sets into this one. + /// + /// # Errors + /// Returns an error if the parent type or schema of any selection does not match `self`'s. + /// + /// Returns an error if any selection contains invalid GraphQL that prevents the merge. + fn merge_into<'op>( + &mut self, + others: impl Iterator, + ) -> Result<(), FederationError> { + let mut selections_to_merge = vec![]; + for other in others { + if other.schema != self.schema { + return Err(FederationError::internal( + "Cannot merge selection sets from different schemas", + )); + } + if other.type_position != self.type_position { + return Err(FederationError::internal( + format!( + "Cannot merge selection set for type \"{}\" into a selection set for type \"{}\"", + other.type_position, + self.type_position, + ), + )); + } + selections_to_merge.extend(other.selections.values()); + } + self.merge_selections_into(selections_to_merge.into_iter()) + } + + /// NOTE: This is a private API and should be used with care, use `add_selection` instead. + /// + /// A helper function for merging the given selections into this one. + /// + /// # Errors + /// Returns an error if the parent type or schema of any selection does not match `self`'s. + /// + /// Returns an error if any selection contains invalid GraphQL that prevents the merge. + pub(super) fn merge_selections_into<'op>( + &mut self, + others: impl Iterator, + ) -> Result<(), FederationError> { + let mut fields = IndexMap::default(); + let mut fragment_spreads = IndexMap::default(); + let mut inline_fragments = IndexMap::default(); + let target = Arc::make_mut(&mut self.selections); + for other_selection in others { + let other_key = other_selection.key(); + match target.entry(other_key.clone()) { + selection_map::Entry::Occupied(existing) => match existing.get() { + Selection::Field(self_field_selection) => { + let Selection::Field(other_field_selection) = other_selection else { + return Err(FederationError::internal( + format!( + "Field selection key for field \"{}\" references non-field selection", + self_field_selection.field.field_position, + ), + )); + }; + fields + .entry(other_key) + .or_insert_with(Vec::new) + .push(other_field_selection); + } + Selection::FragmentSpread(self_fragment_spread_selection) => { + let Selection::FragmentSpread(other_fragment_spread_selection) = + other_selection + else { + return Err(FederationError::internal( + format!( + "Fragment spread selection key for fragment \"{}\" references non-field selection", + self_fragment_spread_selection.spread.fragment_name, + ), + )); + }; + fragment_spreads + .entry(other_key) + .or_insert_with(Vec::new) + .push(other_fragment_spread_selection); + } + Selection::InlineFragment(self_inline_fragment_selection) => { + let Selection::InlineFragment(other_inline_fragment_selection) = + other_selection + else { + return Err(FederationError::internal( + format!( + "Inline fragment selection key under parent type \"{}\" {}references non-field selection", + self_inline_fragment_selection.inline_fragment.parent_type_position, + self_inline_fragment_selection.inline_fragment.type_condition_position.clone() + .map_or_else( + String::new, + |cond| format!("(type condition: {}) ", cond), + ), + ), + )); + }; + inline_fragments + .entry(other_key) + .or_insert_with(Vec::new) + .push(other_inline_fragment_selection); + } + }, + selection_map::Entry::Vacant(vacant) => { + vacant.insert(other_selection.clone())?; + } + } + } + + for (key, self_selection) in target.iter_mut() { + match self_selection { + SelectionValue::Field(mut self_field_selection) => { + if let Some(other_field_selections) = fields.shift_remove(key) { + self_field_selection.merge_into( + other_field_selections.iter().map(|selection| &***selection), + )?; + } + } + SelectionValue::FragmentSpread(mut self_fragment_spread_selection) => { + if let Some(other_fragment_spread_selections) = + fragment_spreads.shift_remove(key) + { + self_fragment_spread_selection.merge_into( + other_fragment_spread_selections + .iter() + .map(|selection| &***selection), + )?; + } + } + SelectionValue::InlineFragment(mut self_inline_fragment_selection) => { + if let Some(other_inline_fragment_selections) = + inline_fragments.shift_remove(key) + { + self_inline_fragment_selection.merge_into( + other_inline_fragment_selections + .iter() + .map(|selection| &***selection), + )?; + } + } + } + } + + Ok(()) + } + + /// Inserts a `Selection` into the inner map. Should a selection with the same key already + /// exist in the map, the existing selection and the given selection are merged, replacing the + /// + /// existing selection while keeping the same insertion index. + /// + /// # Preconditions + /// The provided selection must have the same schema and type position as `self`. Rebase your + /// selection first if it may not meet that precondition. + /// + /// # Errors + /// Returns an error if either `self` or the selection contain invalid GraphQL that prevents the merge. + pub(crate) fn add_local_selection( + &mut self, + selection: &Selection, + ) -> Result<(), FederationError> { + debug_assert_eq!( + &self.schema, + selection.schema(), + "In order to add selection it needs to point to the same schema" + ); + self.merge_selections_into(std::iter::once(selection)) + } + + /// Inserts a `SelectionSet` into the inner map. Should any sub selection with the same key already + /// exist in the map, the existing selection and the given selection are merged, replacing the + /// existing selection while keeping the same insertion index. + /// + /// # Preconditions + /// The provided selection set must have the same schema and type position as `self`. Use + /// [`SelectionSet::add_selection_set`] if your selection set may not meet that precondition. + /// + /// # Errors + /// Returns an error if either selection set contains invalid GraphQL that prevents the merge. + pub(crate) fn add_local_selection_set( + &mut self, + selection_set: &SelectionSet, + ) -> Result<(), FederationError> { + debug_assert_eq!( + self.schema, selection_set.schema, + "In order to add selection set it needs to point to the same schema." + ); + debug_assert_eq!( + self.type_position, selection_set.type_position, + "In order to add selection set it needs to point to the same type position" + ); + self.merge_into(std::iter::once(selection_set)) + } + + /// Rebase given `SelectionSet` on self and then inserts it into the inner map. Assumes that given + /// selection set does not reference ANY named fragments. If it does, Use `add_selection_set_with_fragments` + /// instead. + /// + /// Should any sub selection with the same key already exist in the map, the existing selection + /// and the given selection are merged, replacing the existing selection while keeping the same + /// insertion index. + /// + /// # Errors + /// Returns an error if either selection set contains invalid GraphQL that prevents the merge. + pub(crate) fn add_selection_set( + &mut self, + selection_set: &SelectionSet, + ) -> Result<(), FederationError> { + self.add_selection_set_with_fragments(selection_set, &Default::default()) + } + + /// Rebase given `SelectionSet` on self with the specified fragments and then inserts it into the + /// inner map. + /// + /// Should any sub selection with the same key already exist in the map, the existing selection + /// and the given selection are merged, replacing the existing selection while keeping the same + /// insertion index. + /// + /// # Errors + /// Returns an error if either selection set contains invalid GraphQL that prevents the merge. + pub(crate) fn add_selection_set_with_fragments( + &mut self, + selection_set: &SelectionSet, + named_fragments: &NamedFragments, + ) -> Result<(), FederationError> { + let rebased = + selection_set.rebase_on(&self.type_position, named_fragments, &self.schema)?; + self.add_local_selection_set(&rebased) + } +} + +/// # Preconditions +/// There must be at least one selection set. +/// The selection sets must all have the same schema and type position. +/// +/// # Errors +/// Returns an error if any selection set contains invalid GraphQL that prevents the merge. +pub(crate) fn merge_selection_sets( + mut selection_sets: Vec, +) -> Result { + let Some((first, remainder)) = selection_sets.split_first_mut() else { + return Err(FederationError::internal( + "merge_selection_sets(): must have at least one selection set", + )); + }; + first.merge_into(remainder.iter())?; + + // Take ownership of the first element and discard the rest; + // we can unwrap because `split_first_mut()` guarantees at least one element will be yielded + Ok(selection_sets.into_iter().next().unwrap()) +} diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index b2aa7c3a5b..c2441530a4 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -49,6 +49,7 @@ use crate::schema::position::SchemaRootDefinitionKind; use crate::schema::ValidFederationSchema; mod contains; +mod merging; mod optimize; mod rebase; mod simplify; @@ -56,6 +57,7 @@ mod simplify; mod tests; pub(crate) use contains::*; +pub(crate) use merging::*; pub(crate) use rebase::*; pub(crate) const TYPENAME_FIELD: Name = name!("__typename"); @@ -2105,144 +2107,6 @@ impl SelectionSet { Ok(()) } - /// NOTE: This is a private API and should be used with care, use `add_selection_set` instead. - /// - /// Merges the given normalized selection sets into this one. - fn merge_into<'op>( - &mut self, - others: impl Iterator, - ) -> Result<(), FederationError> { - let mut selections_to_merge = vec![]; - for other in others { - if other.schema != self.schema { - return Err(FederationError::internal( - "Cannot merge selection sets from different schemas", - )); - } - if other.type_position != self.type_position { - return Err(FederationError::internal( - format!( - "Cannot merge selection set for type \"{}\" into a selection set for type \"{}\"", - other.type_position, - self.type_position, - ), - )); - } - selections_to_merge.extend(other.selections.values()); - } - self.merge_selections_into(selections_to_merge.into_iter()) - } - - /// NOTE: This is a private API and should be used with care, use `add_selection` instead. - /// - /// A helper function for merging the given selections into this one. - fn merge_selections_into<'op>( - &mut self, - others: impl Iterator, - ) -> Result<(), FederationError> { - let mut fields = IndexMap::default(); - let mut fragment_spreads = IndexMap::default(); - let mut inline_fragments = IndexMap::default(); - let target = Arc::make_mut(&mut self.selections); - for other_selection in others { - let other_key = other_selection.key(); - match target.entry(other_key.clone()) { - selection_map::Entry::Occupied(existing) => match existing.get() { - Selection::Field(self_field_selection) => { - let Selection::Field(other_field_selection) = other_selection else { - return Err(Internal { - message: format!( - "Field selection key for field \"{}\" references non-field selection", - self_field_selection.field.field_position, - ), - }.into()); - }; - fields - .entry(other_key) - .or_insert_with(Vec::new) - .push(other_field_selection); - } - Selection::FragmentSpread(self_fragment_spread_selection) => { - let Selection::FragmentSpread(other_fragment_spread_selection) = - other_selection - else { - return Err(Internal { - message: format!( - "Fragment spread selection key for fragment \"{}\" references non-field selection", - self_fragment_spread_selection.spread.fragment_name, - ), - }.into()); - }; - fragment_spreads - .entry(other_key) - .or_insert_with(Vec::new) - .push(other_fragment_spread_selection); - } - Selection::InlineFragment(self_inline_fragment_selection) => { - let Selection::InlineFragment(other_inline_fragment_selection) = - other_selection - else { - return Err(Internal { - message: format!( - "Inline fragment selection key under parent type \"{}\" {}references non-field selection", - self_inline_fragment_selection.inline_fragment.parent_type_position, - self_inline_fragment_selection.inline_fragment.type_condition_position.clone() - .map_or_else( - String::new, - |cond| format!("(type condition: {}) ", cond), - ), - ), - }.into()); - }; - inline_fragments - .entry(other_key) - .or_insert_with(Vec::new) - .push(other_inline_fragment_selection); - } - }, - selection_map::Entry::Vacant(vacant) => { - vacant.insert(other_selection.clone())?; - } - } - } - - for (key, self_selection) in target.iter_mut() { - match self_selection { - SelectionValue::Field(mut self_field_selection) => { - if let Some(other_field_selections) = fields.shift_remove(key) { - self_field_selection.merge_into( - other_field_selections.iter().map(|selection| &***selection), - )?; - } - } - SelectionValue::FragmentSpread(mut self_fragment_spread_selection) => { - if let Some(other_fragment_spread_selections) = - fragment_spreads.shift_remove(key) - { - self_fragment_spread_selection.merge_into( - other_fragment_spread_selections - .iter() - .map(|selection| &***selection), - )?; - } - } - SelectionValue::InlineFragment(mut self_inline_fragment_selection) => { - if let Some(other_inline_fragment_selections) = - inline_fragments.shift_remove(key) - { - self_inline_fragment_selection.merge_into( - other_inline_fragment_selections - .iter() - .map(|selection| &***selection), - )?; - } - } - } - } - - Ok(()) - } - pub(crate) fn expand_all_fragments(&self) -> Result { let mut expanded_selections = vec![]; SelectionSet::expand_selection_set(&mut expanded_selections, self)?; @@ -2684,74 +2548,6 @@ impl SelectionSet { self.selections.contains_key(key) } - /// Inserts a `Selection` into the inner map. Should a selection with the same key already - /// exist in the map, the existing selection and the given selection are merged, replacing the - /// existing selection while keeping the same insertion index. - /// - /// NOTE: This method assumes selection already points to the correct schema and parent type. - pub(crate) fn add_local_selection( - &mut self, - selection: &Selection, - ) -> Result<(), FederationError> { - debug_assert_eq!( - &self.schema, - selection.schema(), - "In order to add selection it needs to point to the same schema" - ); - self.merge_selections_into(std::iter::once(selection)) - } - - /// Inserts a `SelectionSet` into the inner map. Should any sub selection with the same key already - /// exist in the map, the existing selection and the given selection are merged, replacing the - /// existing selection while keeping the same insertion index. - /// - /// NOTE: This method assumes the target selection set already points to the same schema and type - /// position. Use `add_selection_set` instead if you need to rebase the selection set. - pub(crate) fn add_local_selection_set( - &mut self, - selection_set: &SelectionSet, - ) -> Result<(), FederationError> { - debug_assert_eq!( - self.schema, selection_set.schema, - "In order to add selection set it needs to point to the same schema." - ); - debug_assert_eq!( - self.type_position, selection_set.type_position, - "In order to add selection set it needs to point to the same type position" - ); - self.merge_into(std::iter::once(selection_set)) - } - - /// Rebase given `SelectionSet` on self and then inserts it into the inner map. Assumes that given - /// selection set does not reference ANY named fragments. If it does, Use `add_selection_set_with_fragments` - /// instead. - /// - /// Should any sub selection with the same key already exist in the map, the existing selection - /// and the given selection are merged, replacing the existing selection while keeping the same - /// insertion index. - pub(crate) fn add_selection_set( - &mut self, - selection_set: &SelectionSet, - ) -> Result<(), FederationError> { - self.add_selection_set_with_fragments(selection_set, &NamedFragments::default()) - } - - /// Rebase given `SelectionSet` on self with the specified fragments and then inserts it into the - /// inner map. - /// - /// Should any sub selection with the same key already exist in the map, the existing selection - /// and the given selection are merged, replacing the existing selection while keeping the same - /// insertion index. - pub(crate) fn add_selection_set_with_fragments( - &mut self, - selection_set: &SelectionSet, - named_fragments: &NamedFragments, - ) -> Result<(), FederationError> { - let rebased = - selection_set.rebase_on(&self.type_position, named_fragments, &self.schema)?; - self.add_local_selection_set(&rebased) - } - /// Adds a path, and optional some selections following that path, to this selection map. /// /// Today, it is possible here to add conflicting paths, such as: @@ -3461,60 +3257,6 @@ impl FieldSelection { } } -impl<'a> FieldSelectionValue<'a> { - /// Merges the given normalized field selections into this one (this method assumes the keys - /// already match). - pub(crate) fn merge_into<'op>( - &mut self, - others: impl Iterator, - ) -> Result<(), FederationError> { - let self_field = &self.get().field; - let mut selection_sets = vec![]; - for other in others { - let other_field = &other.field; - if other_field.schema != self_field.schema { - return Err(Internal { - message: "Cannot merge field selections from different schemas".to_owned(), - } - .into()); - } - if other_field.field_position != self_field.field_position { - return Err(Internal { - message: format!( - "Cannot merge field selection for field \"{}\" into a field selection for field \"{}\"", - other_field.field_position, - self_field.field_position, - ), - }.into()); - } - if self.get().selection_set.is_some() { - let Some(other_selection_set) = &other.selection_set else { - return Err(Internal { - message: format!( - "Field \"{}\" has composite type but not a selection set", - other_field.field_position, - ), - } - .into()); - }; - selection_sets.push(other_selection_set); - } else if other.selection_set.is_some() { - return Err(Internal { - message: format!( - "Field \"{}\" has non-composite type but also has a selection set", - other_field.field_position, - ), - } - .into()); - } - } - if let Some(self_selection_set) = self.get_selection_set_mut() { - self_selection_set.merge_into(selection_sets.into_iter())?; - } - Ok(()) - } -} - impl Field { pub(crate) fn has_defer(&self) -> bool { // @defer cannot be on field at the moment @@ -3536,32 +3278,6 @@ impl Field { } } -impl<'a> FragmentSpreadSelectionValue<'a> { - /// Merges the given normalized fragment spread selections into this one (this method assumes - /// the keys already match). - pub(crate) fn merge_into<'op>( - &mut self, - others: impl Iterator, - ) -> Result<(), FederationError> { - let self_fragment_spread = &self.get().spread; - for other in others { - let other_fragment_spread = &other.spread; - if other_fragment_spread.schema != self_fragment_spread.schema { - return Err(Internal { - message: "Cannot merge fragment spread from different schemas".to_owned(), - } - .into()); - } - // Nothing to do since the fragment spread is already part of the selection set. - // Fragment spreads are uniquely identified by fragment name and applied directives. - // Since there is already an entry for the same fragment spread, there is no point - // in attempting to merge its sub-selections, as the underlying entry should be - // exactly the same as the currently processed one. - } - Ok(()) - } -} - impl InlineFragmentSelection { pub(crate) fn new(inline_fragment: InlineFragment, selection_set: SelectionSet) -> Self { debug_assert_eq!( @@ -3712,58 +3428,6 @@ impl InlineFragmentSelection { } } -impl<'a> InlineFragmentSelectionValue<'a> { - /// Merges the given normalized inline fragment selections into this one (this method assumes - /// the keys already match). - pub(crate) fn merge_into<'op>( - &mut self, - others: impl Iterator, - ) -> Result<(), FederationError> { - let self_inline_fragment = &self.get().inline_fragment; - let mut selection_sets = vec![]; - for other in others { - let other_inline_fragment = &other.inline_fragment; - if other_inline_fragment.schema != self_inline_fragment.schema { - return Err(Internal { - message: "Cannot merge inline fragment from different schemas".to_owned(), - } - .into()); - } - if other_inline_fragment.parent_type_position - != self_inline_fragment.parent_type_position - { - return Err(Internal { - message: format!( - "Cannot merge inline fragment of parent type \"{}\" into an inline fragment of parent type \"{}\"", - other_inline_fragment.parent_type_position, - self_inline_fragment.parent_type_position, - ), - }.into()); - } - selection_sets.push(&other.selection_set); - } - self.get_selection_set_mut() - .merge_into(selection_sets.into_iter())?; - Ok(()) - } -} - -pub(crate) fn merge_selection_sets( - mut selection_sets: Vec, -) -> Result { - let Some((first, remainder)) = selection_sets.split_first_mut() else { - return Err(Internal { - message: "".to_owned(), - } - .into()); - }; - first.merge_into(remainder.iter())?; - - // Take ownership of the first element and discard the rest; - // we can unwrap because `split_first_mut()` guarantees at least one element will be yielded - Ok(selection_sets.into_iter().next().unwrap()) -} - /// This uses internal copy-on-write optimization to make `Clone` cheap. /// However a cloned `NamedFragments` still behaves like a deep copy: /// unlike in JS where we can have multiple references to a mutable map, From d940c3fb76df1b89409720c22dc9f0df5402f172 Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Tue, 30 Jul 2024 10:14:58 +0200 Subject: [PATCH 019/108] Create the invalidation endpoint for entity caching (#5614) Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> Co-authored-by: Geoffroy Couprie --- apollo-router/src/configuration/schema.rs | 8 +- ...nfiguration__tests__schema_generation.snap | 47 +- ...nfiguration@entity_cache_preview.yaml.snap | 3 + .../testdata/metrics/entities.router.yaml | 3 + .../migrations/entity_cache_preview.yaml | 3 + apollo-router/src/notification.rs | 4 +- apollo-router/src/plugins/cache/entity.rs | 102 +++- .../src/plugins/cache/invalidation.rs | 176 ++++-- .../plugins/cache/invalidation_endpoint.rs | 569 ++++++++++++++++++ apollo-router/src/plugins/cache/mod.rs | 1 + apollo-router/src/plugins/cache/tests.rs | 6 +- apollo-router/src/plugins/subscription.rs | 2 +- apollo-router/src/router_factory.rs | 1 - .../uplink/testdata/restricted.router.yaml | 3 + apollo-router/tests/integration/redis.rs | 12 + .../configuration.yaml | 7 + .../invalidation-subgraph-type/skipped.json | 38 +- .../invalidation-subgraph/configuration.yaml | 3 + apollo-router/tests/samples_tests.rs | 52 +- 19 files changed, 944 insertions(+), 96 deletions(-) create mode 100644 apollo-router/src/plugins/cache/invalidation_endpoint.rs diff --git a/apollo-router/src/configuration/schema.rs b/apollo-router/src/configuration/schema.rs index a78015ab63..4d05b786ef 100644 --- a/apollo-router/src/configuration/schema.rs +++ b/apollo-router/src/configuration/schema.rs @@ -161,8 +161,12 @@ pub(crate) fn validate_yaml_configuration( let offset = start_marker .line() .saturating_sub(NUMBER_OF_PREVIOUS_LINES_TO_DISPLAY); - - let lines = yaml_split_by_lines[offset..end_marker.line()] + let end = if end_marker.line() > yaml_split_by_lines.len() { + yaml_split_by_lines.len() + } else { + end_marker.line() + }; + let lines = yaml_split_by_lines[offset..end] .iter() .map(|line| format!(" {line}")) .join("\n"); diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 1f66e536ba..a702a933cd 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -1606,6 +1606,11 @@ expression: "&schema" "description": "Enable or disable the entity caching feature", "type": "boolean" }, + "invalidation": { + "$ref": "#/definitions/InvalidationEndpointConfig", + "description": "#/definitions/InvalidationEndpointConfig", + "nullable": true + }, "metrics": { "$ref": "#/definitions/Metrics", "description": "#/definitions/Metrics" @@ -3518,6 +3523,24 @@ expression: "&schema" }, "type": "object" }, + "InvalidationEndpointConfig": { + "additionalProperties": false, + "properties": { + "listen": { + "$ref": "#/definitions/ListenAddr", + "description": "#/definitions/ListenAddr" + }, + "path": { + "description": "Specify on which path you want to listen for invalidation endpoint.", + "type": "string" + } + }, + "required": [ + "listen", + "path" + ], + "type": "object" + }, "JWTConf": { "additionalProperties": false, "properties": { @@ -5571,11 +5594,17 @@ expression: "&schema" "description": "Per subgraph configuration for entity caching", "properties": { "enabled": { + "default": true, "description": "activates caching for this subgraph, overrides the global configuration", - "nullable": true, "type": "boolean" }, + "invalidation": { + "$ref": "#/definitions/SubgraphInvalidationConfig", + "description": "#/definitions/SubgraphInvalidationConfig", + "nullable": true + }, "private_id": { + "default": null, "description": "Context key used to separate cache sections per user", "nullable": true, "type": "string" @@ -5779,6 +5808,22 @@ expression: "&schema" }, "type": "object" }, + "SubgraphInvalidationConfig": { + "additionalProperties": false, + "properties": { + "enabled": { + "default": false, + "description": "Enable the invalidation", + "type": "boolean" + }, + "shared_key": { + "default": "", + "description": "Shared key needed to request the invalidation endpoint", + "type": "string" + } + }, + "type": "object" + }, "SubgraphPassthroughMode": { "additionalProperties": false, "properties": { diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview.yaml.snap index 08bc3e55b9..5544788d20 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview.yaml.snap @@ -10,6 +10,9 @@ preview_entity_cache: timeout: 5ms ttl: 60s enabled: true + invalidation: + listen: "127.0.0.1:4000" + path: /invalidation subgraph: subgraphs: accounts: diff --git a/apollo-router/src/configuration/testdata/metrics/entities.router.yaml b/apollo-router/src/configuration/testdata/metrics/entities.router.yaml index 8c810effa7..0c886c2d64 100644 --- a/apollo-router/src/configuration/testdata/metrics/entities.router.yaml +++ b/apollo-router/src/configuration/testdata/metrics/entities.router.yaml @@ -4,6 +4,9 @@ preview_entity_cache: urls: [ "redis://localhost:6379" ] timeout: 5ms ttl: 60s + invalidation: + listen: 127.0.0.1:4000 + path: /invalidation subgraph: all: enabled: true diff --git a/apollo-router/src/configuration/testdata/migrations/entity_cache_preview.yaml b/apollo-router/src/configuration/testdata/migrations/entity_cache_preview.yaml index 2539a571ce..c210551098 100644 --- a/apollo-router/src/configuration/testdata/migrations/entity_cache_preview.yaml +++ b/apollo-router/src/configuration/testdata/migrations/entity_cache_preview.yaml @@ -4,6 +4,9 @@ preview_entity_cache: timeout: 5ms ttl: 60s enabled: true + invalidation: + listen: 127.0.0.1:4000 + path: /invalidation subgraphs: accounts: enabled: false diff --git a/apollo-router/src/notification.rs b/apollo-router/src/notification.rs index 77aff5db43..7cfba87e7a 100644 --- a/apollo-router/src/notification.rs +++ b/apollo-router/src/notification.rs @@ -807,6 +807,7 @@ where } #[allow(clippy::collapsible_if)] if topic_to_delete { + tracing::trace!("deleting subscription from unsubscribe"); if self.subscriptions.remove(&topic).is_some() { i64_up_down_counter!( "apollo_router_opened_subscriptions", @@ -880,6 +881,7 @@ where // Send error message to all killed connections for (_subscriber_id, subscription) in closed_subs { + tracing::trace!("deleting subscription from kill_dead_topics"); i64_up_down_counter!( "apollo_router_opened_subscriptions", "Number of opened subscriptions", @@ -907,7 +909,7 @@ where } fn force_delete(&mut self, topic: K) { - tracing::trace!("deleting subscription"); + tracing::trace!("deleting subscription from force_delete"); let sub = self.subscriptions.remove(&topic); if let Some(sub) = sub { i64_up_down_counter!( diff --git a/apollo-router/src/plugins/cache/entity.rs b/apollo-router/src/plugins/cache/entity.rs index 2375d4fde4..7332ca0d10 100644 --- a/apollo-router/src/plugins/cache/entity.rs +++ b/apollo-router/src/plugins/cache/entity.rs @@ -7,6 +7,7 @@ use std::time::Duration; use http::header; use http::header::CACHE_CONTROL; +use multimap::MultiMap; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; @@ -26,6 +27,9 @@ use tracing::Level; use super::cache_control::CacheControl; use super::invalidation::Invalidation; use super::invalidation::InvalidationOrigin; +use super::invalidation_endpoint::InvalidationEndpointConfig; +use super::invalidation_endpoint::InvalidationService; +use super::invalidation_endpoint::SubgraphInvalidationConfig; use super::metrics::CacheMetricContextKey; use super::metrics::CacheMetricsService; use crate::batching::BatchQuery; @@ -49,6 +53,8 @@ use crate::services::subgraph; use crate::services::supergraph; use crate::spec::TYPENAME; use crate::Context; +use crate::Endpoint; +use crate::ListenAddr; /// Change this key if you introduce a breaking change in entity caching algorithm to make sure it won't take the previous entries pub(crate) const ENTITY_CACHE_VERSION: &str = "1.0"; @@ -61,6 +67,7 @@ register_plugin!("apollo", "preview_entity_cache", EntityCache); #[derive(Clone)] pub(crate) struct EntityCache { storage: Option, + endpoint_config: Option>, subgraphs: Arc>, entity_type: Option, enabled: bool, @@ -78,25 +85,43 @@ pub(crate) struct Config { #[serde(default)] enabled: bool, + /// Configure invalidation per subgraph subgraph: SubgraphConfiguration, + /// Global invalidation configuration + invalidation: Option, + /// Entity caching evaluation metrics #[serde(default)] metrics: Metrics, } /// Per subgraph configuration for entity caching -#[derive(Clone, Debug, Default, JsonSchema, Deserialize, Serialize)] -#[serde(rename_all = "snake_case", deny_unknown_fields)] +#[derive(Clone, Debug, JsonSchema, Deserialize, Serialize)] +#[serde(rename_all = "snake_case", deny_unknown_fields, default)] pub(crate) struct Subgraph { /// expiration for all keys for this subgraph, unless overriden by the `Cache-Control` header in subgraph responses pub(crate) ttl: Option, /// activates caching for this subgraph, overrides the global configuration - pub(crate) enabled: Option, + pub(crate) enabled: bool, /// Context key used to separate cache sections per user pub(crate) private_id: Option, + + /// Invalidation configuration + pub(crate) invalidation: Option, +} + +impl Default for Subgraph { + fn default() -> Self { + Self { + enabled: true, + ttl: Default::default(), + private_id: Default::default(), + invalidation: Default::default(), + } + } } /// Per subgraph configuration for entity caching @@ -179,12 +204,29 @@ impl Plugin for EntityCache { .into()); } + if init + .config + .subgraph + .all + .invalidation + .as_ref() + .map(|i| i.shared_key.is_empty()) + .unwrap_or_default() + { + return Err( + "you must set a default shared_key invalidation for all subgraphs" + .to_string() + .into(), + ); + } + let invalidation = Invalidation::new(storage.clone()).await?; Ok(Self { storage, entity_type, enabled: init.config.enabled, + endpoint_config: init.config.invalidation.clone().map(Arc::new), subgraphs: Arc::new(init.config.subgraph), metrics: init.config.metrics, private_queries: Arc::new(RwLock::new(HashSet::new())), @@ -240,13 +282,8 @@ impl Plugin for EntityCache { .clone() .map(|t| t.0) .or_else(|| storage.ttl()); - let subgraph_enabled = self.enabled - && self - .subgraphs - .get(name) - .enabled - // if the top level `enabled` is true but there is no other configuration, caching is enabled for this plugin - .unwrap_or(true); + let subgraph_enabled = + self.enabled && (self.subgraphs.all.enabled || self.subgraphs.get(name).enabled); let private_id = self.subgraphs.get(name).private_id.clone(); let name = name.to_string(); @@ -300,6 +337,40 @@ impl Plugin for EntityCache { .boxed() } } + + fn web_endpoints(&self) -> MultiMap { + let mut map = MultiMap::new(); + if self.enabled + && self + .subgraphs + .all + .invalidation + .as_ref() + .map(|i| i.enabled) + .unwrap_or_default() + { + match &self.endpoint_config { + Some(endpoint_config) => { + let endpoint = Endpoint::from_router_service( + endpoint_config.path.clone(), + InvalidationService::new(self.subgraphs.clone(), self.invalidation.clone()) + .boxed(), + ); + tracing::info!( + "Entity caching invalidation endpoint listening on: {}{}", + endpoint_config.listen, + endpoint_config.path + ); + map.insert(endpoint_config.listen.clone(), endpoint); + } + None => { + tracing::warn!("Cannot start entity caching invalidation endpoint because the listen address and endpoint is not configured"); + } + } + } + + map + } } impl EntityCache { @@ -311,6 +382,10 @@ impl EntityCache { where Self: Sized, { + use std::net::IpAddr; + use std::net::Ipv4Addr; + use std::net::SocketAddr; + let invalidation = Invalidation::new(Some(storage.clone())).await?; Ok(Self { storage: Some(storage), @@ -322,6 +397,13 @@ impl EntityCache { }), metrics: Metrics::default(), private_queries: Default::default(), + endpoint_config: Some(Arc::new(InvalidationEndpointConfig { + path: String::from("/invalidation"), + listen: ListenAddr::SocketAddr(SocketAddr::new( + IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + 4000, + )), + })), invalidation, }) } diff --git a/apollo-router/src/plugins/cache/invalidation.rs b/apollo-router/src/plugins/cache/invalidation.rs index 96c863e437..4e8e5d5204 100644 --- a/apollo-router/src/plugins/cache/invalidation.rs +++ b/apollo-router/src/plugins/cache/invalidation.rs @@ -1,11 +1,15 @@ use std::time::Instant; +use fred::error::RedisError; use fred::types::Scanner; use futures::SinkExt; use futures::StreamExt; +use itertools::Itertools; use serde::Deserialize; use serde::Serialize; use serde_json_bytes::Value; +use thiserror::Error; +use tokio::sync::broadcast; use tower::BoxError; use tracing::Instrument; @@ -19,15 +23,48 @@ use crate::Notify; #[derive(Clone)] pub(crate) struct Invalidation { - enabled: bool, - handle: Handle)>, + pub(super) enabled: bool, + #[allow(clippy::type_complexity)] + pub(super) handle: Handle< + InvalidationTopic, + ( + Vec, + InvalidationOrigin, + broadcast::Sender>, + ), + >, } +#[derive(Error, Debug, Clone)] +pub(crate) enum InvalidationError { + #[error("redis error")] + RedisError(#[from] RedisError), + #[error("several errors")] + Errors(#[from] InvalidationErrors), + #[cfg(test)] + #[error("custom error: {0}")] + Custom(String), +} + +#[derive(Debug, Clone)] +pub(crate) struct InvalidationErrors(Vec); + +impl std::fmt::Display for InvalidationErrors { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "invalidation errors: [{}]", + self.0.iter().map(|e| e.to_string()).join("; ") + ) + } +} + +impl std::error::Error for InvalidationErrors {} + #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub(crate) struct InvalidationTopic; -#[derive(Clone, Debug)] -#[allow(dead_code)] +#[derive(Clone, Debug, PartialEq)] pub(crate) enum InvalidationOrigin { Endpoint, Extensions, @@ -38,10 +75,12 @@ impl Invalidation { let mut notify = Notify::new(None, None, None); let (handle, _b) = notify.create_or_subscribe(InvalidationTopic, false).await?; let enabled = storage.is_some(); - if let Some(storage) = storage { + if let Some(storage) = storage.clone() { let h = handle.clone(); - tokio::task::spawn(async move { start(storage, h.into_stream()).await }); + tokio::task::spawn(async move { + start(storage, h.into_stream()).await; + }); } Ok(Self { enabled, handle }) } @@ -50,21 +89,46 @@ impl Invalidation { &mut self, origin: InvalidationOrigin, requests: Vec, - ) -> Result<(), BoxError> { + ) -> Result { if self.enabled { let mut sink = self.handle.clone().into_sink(); - sink.send((origin, requests)).await.map_err(|e| e.message)?; - } + let (response_tx, mut response_rx) = broadcast::channel(2); + sink.send((requests, origin, response_tx.clone())) + .await + .map_err(|e| format!("cannot send invalidation request: {}", e.message))?; + + let result = response_rx + .recv() + .await + .map_err(|err| { + format!( + "cannot receive response for invalidation request: {:?}", + err + ) + })? + .map_err(|err| format!("received an invalidation error: {:?}", err))?; - Ok(()) + Ok(result) + } else { + Ok(0) + } } } +// TODO refactor +#[allow(clippy::type_complexity)] async fn start( storage: RedisCacheStorage, - mut handle: HandleStream)>, + mut handle: HandleStream< + InvalidationTopic, + ( + Vec, + InvalidationOrigin, + broadcast::Sender>, + ), + >, ) { - while let Some((origin, requests)) = handle.next().await { + while let Some((requests, origin, response_tx)) = handle.next().await { let origin = match origin { InvalidationOrigin::Endpoint => "endpoint", InvalidationOrigin::Extensions => "extensions", @@ -75,30 +139,16 @@ async fn start( 1u64, "origin" = origin ); - handle_request_batch(&storage, origin, requests) - .instrument(tracing::info_span!( - "cache.invalidation.batch", - "origin" = origin - )) - .await - } -} - -async fn handle_request_batch( - storage: &RedisCacheStorage, - origin: &'static str, - requests: Vec, -) { - for request in requests { - let start = Instant::now(); - handle_request(storage, origin, &request) - .instrument(tracing::info_span!("cache.invalidation.request")) - .await; - f64_histogram!( - "apollo.router.cache.invalidation.duration", - "Duration of the invalidation event execution.", - start.elapsed().as_secs_f64() - ); + if let Err(err) = response_tx.send( + handle_request_batch(&storage, origin, requests) + .instrument(tracing::info_span!( + "cache.invalidation.batch", + "origin" = origin + )) + .await, + ) { + ::tracing::error!("cannot send answer to invalidation request in the channel: {err}"); + } } } @@ -106,9 +156,9 @@ async fn handle_request( storage: &RedisCacheStorage, origin: &'static str, request: &InvalidationRequest, -) { +) -> Result { let key_prefix = request.key_prefix(); - let subgraph = request.subgraph(); + let subgraph = request.subgraph_name(); tracing::debug!( "got invalidation request: {request:?}, will scan for: {}", key_prefix @@ -117,6 +167,7 @@ async fn handle_request( // FIXME: configurable batch size let mut stream = storage.scan(key_prefix.clone(), Some(10)); let mut count = 0u64; + let mut error = None; while let Some(res) = stream.next().await { match res { @@ -126,6 +177,7 @@ async fn handle_request( error = %e, message = "error scanning for key", ); + error = Some(e); break; } Ok(scan_res) => { @@ -158,9 +210,46 @@ async fn handle_request( "Number of invalidated keys.", count ); + + match error { + Some(err) => Err(err.into()), + None => Ok(count), + } +} + +async fn handle_request_batch( + storage: &RedisCacheStorage, + origin: &'static str, + requests: Vec, +) -> Result { + let mut count = 0; + let mut errors = Vec::new(); + for request in requests { + let start = Instant::now(); + match handle_request(storage, origin, &request) + .instrument(tracing::info_span!("cache.invalidation.request")) + .await + { + Ok(c) => count += c, + Err(err) => { + errors.push(err); + } + } + f64_histogram!( + "apollo.router.cache.invalidation.duration", + "Duration of the invalidation event execution.", + start.elapsed().as_secs_f64() + ); + } + + if !errors.is_empty() { + Err(InvalidationErrors(errors).into()) + } else { + Ok(count) + } } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] #[serde(tag = "kind", rename_all = "lowercase")] pub(crate) enum InvalidationRequest { Subgraph { @@ -197,12 +286,11 @@ impl InvalidationRequest { } } - fn subgraph(&self) -> String { + pub(super) fn subgraph_name(&self) -> &String { match self { - InvalidationRequest::Subgraph { subgraph } => subgraph.clone(), - _ => { - todo!() - } + InvalidationRequest::Subgraph { subgraph } + | InvalidationRequest::Type { subgraph, .. } + | InvalidationRequest::Entity { subgraph, .. } => subgraph, } } } diff --git a/apollo-router/src/plugins/cache/invalidation_endpoint.rs b/apollo-router/src/plugins/cache/invalidation_endpoint.rs new file mode 100644 index 0000000000..424751c830 --- /dev/null +++ b/apollo-router/src/plugins/cache/invalidation_endpoint.rs @@ -0,0 +1,569 @@ +use std::sync::Arc; +use std::task::Poll; + +use bytes::Buf; +use futures::future::BoxFuture; +use http::header::AUTHORIZATION; +use http::Method; +use http::StatusCode; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use serde_json_bytes::json; +use tower::BoxError; +use tower::Service; +use tracing_futures::Instrument; + +use super::entity::Subgraph; +use super::invalidation::Invalidation; +use super::invalidation::InvalidationOrigin; +use crate::configuration::subgraph::SubgraphConfiguration; +use crate::plugins::cache::invalidation::InvalidationRequest; +use crate::services::router; +use crate::services::router::body::RouterBody; +use crate::ListenAddr; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, JsonSchema, Default)] +#[serde(rename_all = "snake_case", deny_unknown_fields, default)] +pub(crate) struct SubgraphInvalidationConfig { + /// Enable the invalidation + pub(crate) enabled: bool, + /// Shared key needed to request the invalidation endpoint + pub(crate) shared_key: String, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "snake_case", deny_unknown_fields)] +pub(crate) struct InvalidationEndpointConfig { + /// Specify on which path you want to listen for invalidation endpoint. + pub(crate) path: String, + /// Listen address on which the invalidation endpoint must listen. + pub(crate) listen: ListenAddr, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub(crate) enum InvalidationType { + EntityType, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub(crate) struct InvalidationKey { + pub(crate) id: String, + pub(crate) field: String, +} + +#[derive(Clone)] +pub(crate) struct InvalidationService { + config: Arc>, + invalidation: Invalidation, +} + +impl InvalidationService { + pub(crate) fn new( + config: Arc>, + invalidation: Invalidation, + ) -> Self { + Self { + config, + invalidation, + } + } +} + +impl Service for InvalidationService { + type Response = router::Response; + type Error = BoxError; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll> { + Ok(()).into() + } + + fn call(&mut self, req: router::Request) -> Self::Future { + let mut invalidation = self.invalidation.clone(); + let config = self.config.clone(); + Box::pin( + async move { + let (parts, body) = req.router_request.into_parts(); + if !parts.headers.contains_key(AUTHORIZATION) { + return Ok(router::Response { + response: http::Response::builder() + .status(StatusCode::UNAUTHORIZED) + .body("Missing authorization header".into()) + .map_err(BoxError::from)?, + context: req.context, + }); + } + match parts.method { + Method::POST => { + let body = Into::::into(body) + .to_bytes() + .await + .map_err(|e| format!("failed to get the request body: {e}")) + .and_then(|bytes| { + serde_json::from_reader::<_, Vec>( + bytes.reader(), + ) + .map_err(|err| { + format!( + "failed to deserialize the request body into JSON: {err}" + ) + }) + }); + let shared_key = parts + .headers + .get(AUTHORIZATION) + .ok_or("cannot find authorization header")? + .to_str()?; + match body { + Ok(body) => { + let valid_shared_key = + body.iter().map(|b| b.subgraph_name()).any(|subgraph_name| { + valid_shared_key(&config, shared_key, subgraph_name) + }); + if !valid_shared_key { + return Ok(router::Response { + response: http::Response::builder() + .status(StatusCode::UNAUTHORIZED) + .body("Invalid authorization header".into()) + .map_err(BoxError::from)?, + context: req.context, + }); + } + match invalidation + .invalidate(InvalidationOrigin::Endpoint, body) + .await + { + Ok(count) => Ok(router::Response { + response: http::Response::builder() + .status(StatusCode::ACCEPTED) + .body( + serde_json::to_string(&json!({ + "count": count + }))? + .into(), + ) + .map_err(BoxError::from)?, + context: req.context, + }), + Err(err) => Ok(router::Response { + response: http::Response::builder() + .status(StatusCode::BAD_REQUEST) + .body(err.to_string().into()) + .map_err(BoxError::from)?, + context: req.context, + }), + } + } + Err(err) => Ok(router::Response { + response: http::Response::builder() + .status(StatusCode::BAD_REQUEST) + .body(err.into()) + .map_err(BoxError::from)?, + context: req.context, + }), + } + } + _ => Ok(router::Response { + response: http::Response::builder() + .status(StatusCode::METHOD_NOT_ALLOWED) + .body("".into()) + .map_err(BoxError::from)?, + context: req.context, + }), + } + } + .instrument(tracing::info_span!("invalidation_endpoint")), + ) + } +} + +fn valid_shared_key( + config: &SubgraphConfiguration, + shared_key: &str, + subgraph_name: &str, +) -> bool { + config + .all + .invalidation + .as_ref() + .map(|i| i.shared_key == shared_key) + .unwrap_or_default() + || config + .subgraphs + .get(subgraph_name) + .and_then(|s| s.invalidation.as_ref()) + .map(|i| i.shared_key == shared_key) + .unwrap_or_default() +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use tokio::sync::broadcast::Sender; + use tokio_stream::StreamExt; + use tower::ServiceExt; + + use super::*; + use crate::plugins::cache::invalidation::InvalidationError; + use crate::plugins::cache::invalidation::InvalidationTopic; + use crate::Notify; + + #[tokio::test] + async fn test_invalidation_service_bad_shared_key() { + #[allow(clippy::type_complexity)] + let mut notify: Notify< + InvalidationTopic, + ( + Vec, + InvalidationOrigin, + Sender>, + ), + > = Notify::new(None, None, None); + let (handle, _b) = notify + .create_or_subscribe(InvalidationTopic, false) + .await + .unwrap(); + let invalidation = Invalidation { + enabled: true, + handle, + }; + let config = Arc::new(SubgraphConfiguration { + all: Subgraph { + ttl: None, + enabled: true, + private_id: None, + invalidation: Some(SubgraphInvalidationConfig { + enabled: true, + shared_key: String::from("test"), + }), + }, + subgraphs: HashMap::new(), + }); + let service = InvalidationService::new(config, invalidation); + let req = router::Request::fake_builder() + .method(http::Method::POST) + .header(AUTHORIZATION, "testttt") + .body( + serde_json::to_vec(&[ + InvalidationRequest::Subgraph { + subgraph: String::from("test"), + }, + InvalidationRequest::Type { + subgraph: String::from("test"), + r#type: String::from("Test"), + }, + ]) + .unwrap(), + ) + .build() + .unwrap(); + let res = service.oneshot(req).await.unwrap(); + assert_eq!(res.response.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn test_invalidation_service_good_sub_shared_key() { + #[allow(clippy::type_complexity)] + let mut notify: Notify< + InvalidationTopic, + ( + Vec, + InvalidationOrigin, + Sender>, + ), + > = Notify::new(None, None, None); + let (handle, _b) = notify + .create_or_subscribe(InvalidationTopic, false) + .await + .unwrap(); + let h = handle.clone(); + + tokio::task::spawn(async move { + let mut handle = h.into_stream(); + let mut called = false; + while let Some((requests, origin, response_tx)) = handle.next().await { + called = true; + if requests + != [ + InvalidationRequest::Subgraph { + subgraph: String::from("test"), + }, + InvalidationRequest::Type { + subgraph: String::from("test"), + r#type: String::from("Test"), + }, + ] + { + response_tx + .send(Err(InvalidationError::Custom(format!( + "it's not the right invalidation requests : {requests:?}" + )))) + .unwrap(); + return; + } + if origin != InvalidationOrigin::Endpoint { + response_tx + .send(Err(InvalidationError::Custom(format!( + "it's not the right invalidation origin : {origin:?}" + )))) + .unwrap(); + return; + } + response_tx.send(Ok(0)).unwrap(); + } + assert!(called); + }); + + let invalidation = Invalidation { + enabled: true, + handle: handle.clone(), + }; + let config = Arc::new(SubgraphConfiguration { + all: Subgraph { + ttl: None, + enabled: true, + private_id: None, + invalidation: Some(SubgraphInvalidationConfig { + enabled: true, + shared_key: String::from("test"), + }), + }, + subgraphs: [( + String::from("test"), + Subgraph { + ttl: None, + enabled: true, + private_id: None, + invalidation: Some(SubgraphInvalidationConfig { + enabled: true, + shared_key: String::from("test_test"), + }), + }, + )] + .into_iter() + .collect(), + }); + let service = InvalidationService::new(config, invalidation); + let req = router::Request::fake_builder() + .method(http::Method::POST) + .header(AUTHORIZATION, "test_test") + .body( + serde_json::to_vec(&[ + InvalidationRequest::Subgraph { + subgraph: String::from("test"), + }, + InvalidationRequest::Type { + subgraph: String::from("test"), + r#type: String::from("Test"), + }, + ]) + .unwrap(), + ) + .build() + .unwrap(); + let res = service.oneshot(req).await.unwrap(); + assert_eq!(res.response.status(), StatusCode::ACCEPTED); + let h = handle.clone(); + + tokio::task::spawn(async move { + let mut handle = h.into_stream(); + let mut called = false; + while let Some((requests, origin, response_tx)) = handle.next().await { + called = true; + if requests + != [ + InvalidationRequest::Subgraph { + subgraph: String::from("test"), + }, + InvalidationRequest::Type { + subgraph: String::from("test"), + r#type: String::from("Test"), + }, + ] + { + response_tx + .send(Err(InvalidationError::Custom(format!( + "it's not the right invalidation requests : {requests:?}" + )))) + .unwrap(); + return; + } + if origin != InvalidationOrigin::Endpoint { + response_tx + .send(Err(InvalidationError::Custom(format!( + "it's not the right invalidation origin : {origin:?}" + )))) + .unwrap(); + return; + } + response_tx.send(Ok(0)).unwrap(); + } + assert!(called); + }); + } + + #[tokio::test] + async fn test_invalidation_service_bad_shared_key_subgraph() { + #[allow(clippy::type_complexity)] + let mut notify: Notify< + InvalidationTopic, + ( + Vec, + InvalidationOrigin, + Sender>, + ), + > = Notify::new(None, None, None); + let (handle, _b) = notify + .create_or_subscribe(InvalidationTopic, false) + .await + .unwrap(); + let invalidation = Invalidation { + enabled: true, + handle, + }; + let config = Arc::new(SubgraphConfiguration { + all: Subgraph { + ttl: None, + enabled: true, + private_id: None, + invalidation: Some(SubgraphInvalidationConfig { + enabled: true, + shared_key: String::from("test"), + }), + }, + subgraphs: [( + String::from("test"), + Subgraph { + ttl: None, + enabled: true, + private_id: None, + invalidation: Some(SubgraphInvalidationConfig { + enabled: true, + shared_key: String::from("test_test"), + }), + }, + )] + .into_iter() + .collect(), + }); + // Trying to invalidation with shared_key on subgraph test for a subgraph foo + let service = InvalidationService::new(config, invalidation); + let req = router::Request::fake_builder() + .method(http::Method::POST) + .header(AUTHORIZATION, "test_test") + .body( + serde_json::to_vec(&[InvalidationRequest::Subgraph { + subgraph: String::from("foo"), + }]) + .unwrap(), + ) + .build() + .unwrap(); + let res = service.oneshot(req).await.unwrap(); + assert_eq!(res.response.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn test_invalidation_service() { + #[allow(clippy::type_complexity)] + let mut notify: Notify< + InvalidationTopic, + ( + Vec, + InvalidationOrigin, + Sender>, + ), + > = Notify::new(None, None, None); + let (handle, _b) = notify + .create_or_subscribe(InvalidationTopic, false) + .await + .unwrap(); + let h = handle.clone(); + + tokio::task::spawn(async move { + let mut handle = h.into_stream(); + let mut called = false; + while let Some((requests, origin, response_tx)) = handle.next().await { + called = true; + if requests + != [ + InvalidationRequest::Subgraph { + subgraph: String::from("test"), + }, + InvalidationRequest::Type { + subgraph: String::from("test"), + r#type: String::from("Test"), + }, + ] + { + response_tx + .send(Err(InvalidationError::Custom(format!( + "it's not the right invalidation requests : {requests:?}" + )))) + .unwrap(); + return; + } + if origin != InvalidationOrigin::Endpoint { + response_tx + .send(Err(InvalidationError::Custom(format!( + "it's not the right invalidation origin : {origin:?}" + )))) + .unwrap(); + return; + } + response_tx.send(Ok(2)).unwrap(); + } + assert!(called); + }); + + let invalidation = Invalidation { + enabled: true, + handle, + }; + let config = Arc::new(SubgraphConfiguration { + all: Subgraph { + ttl: None, + enabled: true, + private_id: None, + invalidation: Some(SubgraphInvalidationConfig { + enabled: true, + shared_key: String::from("test"), + }), + }, + subgraphs: HashMap::new(), + }); + let service = InvalidationService::new(config, invalidation); + let req = router::Request::fake_builder() + .method(http::Method::POST) + .header(AUTHORIZATION, "test") + .body( + serde_json::to_vec(&[ + InvalidationRequest::Subgraph { + subgraph: String::from("test"), + }, + InvalidationRequest::Type { + subgraph: String::from("test"), + r#type: String::from("Test"), + }, + ]) + .unwrap(), + ) + .build() + .unwrap(); + let res = service.oneshot(req).await.unwrap(); + assert_eq!(res.response.status(), StatusCode::ACCEPTED); + assert_eq!( + serde_json::from_slice::( + &hyper::body::to_bytes(res.response.into_body()) + .await + .unwrap() + ) + .unwrap(), + serde_json::json!({"count": 2}) + ); + } +} diff --git a/apollo-router/src/plugins/cache/mod.rs b/apollo-router/src/plugins/cache/mod.rs index dded2f9586..c45265a3d3 100644 --- a/apollo-router/src/plugins/cache/mod.rs +++ b/apollo-router/src/plugins/cache/mod.rs @@ -1,6 +1,7 @@ pub(crate) mod cache_control; pub(crate) mod entity; pub(crate) mod invalidation; +pub(crate) mod invalidation_endpoint; pub(crate) mod metrics; #[cfg(test)] pub(crate) mod tests; diff --git a/apollo-router/src/plugins/cache/tests.rs b/apollo-router/src/plugins/cache/tests.rs index 3d0bb21169..8af136c0c9 100644 --- a/apollo-router/src/plugins/cache/tests.rs +++ b/apollo-router/src/plugins/cache/tests.rs @@ -399,16 +399,18 @@ async fn private() { "user".to_string(), Subgraph { private_id: Some("sub".to_string()), - enabled: Some(true), + enabled: true, ttl: None, + ..Default::default() }, ), ( "orga".to_string(), Subgraph { private_id: Some("sub".to_string()), - enabled: Some(true), + enabled: true, ttl: None, + ..Default::default() }, ), ] diff --git a/apollo-router/src/plugins/subscription.rs b/apollo-router/src/plugins/subscription.rs index 4ca4d56201..50d5e78ead 100644 --- a/apollo-router/src/plugins/subscription.rs +++ b/apollo-router/src/plugins/subscription.rs @@ -229,7 +229,7 @@ fn default_path() -> String { String::from("/callback") } -fn default_listen_addr() -> ListenAddr { +pub(crate) fn default_listen_addr() -> ListenAddr { ListenAddr::SocketAddr("127.0.0.1:4000".parse().expect("valid ListenAddr")) } diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index ca1dd4cb7b..e2d20593a9 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -602,7 +602,6 @@ pub(crate) async fn create_plugins( ($name: literal, $opt_plugin_config: expr) => {{ let name = concat!("apollo.", $name); let span = tracing::info_span!(concat!("plugin: ", "apollo.", $name)); - async { let factory = apollo_plugin_factories .remove(name) diff --git a/apollo-router/src/uplink/testdata/restricted.router.yaml b/apollo-router/src/uplink/testdata/restricted.router.yaml index 14aa7bd994..278e6134c8 100644 --- a/apollo-router/src/uplink/testdata/restricted.router.yaml +++ b/apollo-router/src/uplink/testdata/restricted.router.yaml @@ -54,6 +54,9 @@ plugins: preview_entity_cache: enabled: true + invalidation: + listen: 127.0.0.1:4000 + path: /invalidation redis: urls: - https://example.com diff --git a/apollo-router/tests/integration/redis.rs b/apollo-router/tests/integration/redis.rs index b7bfe2ea52..e0cfc0037e 100644 --- a/apollo-router/tests/integration/redis.rs +++ b/apollo-router/tests/integration/redis.rs @@ -364,6 +364,10 @@ async fn entity_cache() -> Result<(), BoxError> { "urls": ["redis://127.0.0.1:6379"], "ttl": "2s" }, + "invalidation": { + "listen": "127.0.0.1:4000", + "path": "/invalidation" + }, "subgraph": { "all": { "enabled": false @@ -474,6 +478,10 @@ async fn entity_cache() -> Result<(), BoxError> { "urls": ["redis://127.0.0.1:6379"], "ttl": "2s" }, + "invalidation": { + "listen": "127.0.0.1:4000", + "path": "/invalidation" + }, "subgraph": { "all": { "enabled": false, @@ -677,6 +685,10 @@ async fn entity_cache_authorization() -> Result<(), BoxError> { "urls": ["redis://127.0.0.1:6379"], "ttl": "2s" }, + "invalidation": { + "listen": "127.0.0.1:4000", + "path": "/invalidation" + }, "subgraph": { "all": { "enabled": false, diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/configuration.yaml b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/configuration.yaml index b297fee443..55728b841b 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/configuration.yaml +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/configuration.yaml @@ -8,9 +8,16 @@ preview_entity_cache: redis: urls: ["redis://localhost:6379",] + invalidation: + # FIXME: right now we cannot configure it to use the same port used for the GraphQL endpoint if it is chosen at random + listen: 127.0.0.1:12345 + path: /invalidation-sample-subgraph-type subgraph: all: enabled: true + invalidation: + enabled: true + shared_key: "1234" subgraphs: reviews: ttl: 120s diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/skipped.json b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/skipped.json index f6996f21b8..89e90f1be9 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/skipped.json +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/skipped.json @@ -43,28 +43,7 @@ "type": "ReloadSubgraphs", "subgraphs": { "accounts": { - "requests": [ - { - "request": { - "body": {"query":"mutation{updateMyAccount{name}}"} - }, - "response": { - "headers": { - "Content-Type": "application/json" - }, - "body": { - "data": { "updateMyAccount": { "name": "invalidation-subgraph-type2" } }, - "extensions": { - "invalidation": [{ - "kind": "type", - "subgraph": "accounts", - "type": "Query" - }] - } - } - } - } - ] + "requests": [] } } }, @@ -83,15 +62,14 @@ } }, { - "type": "Request", + "type": "EndpointRequest", + "url": "http://127.0.0.1:12345/invalidation-sample-subgraph-type", "request": { - "query": "mutation { updateMyAccount { name } }" - }, - "expected_response": { - "data":{ - "updateMyAccount":{ - "name":"invalidation-subgraph-type2" - } + "method": "POST", + "body": { + "kind": "type", + "subgraph": "accounts", + "type": "Query" } } }, diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/configuration.yaml b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/configuration.yaml index b297fee443..a54c33f25d 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/configuration.yaml +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/configuration.yaml @@ -5,6 +5,9 @@ include_subgraph_errors: preview_entity_cache: enabled: true + invalidation: + listen: 127.0.0.1:4000 + path: /invalidation redis: urls: ["redis://localhost:6379",] diff --git a/apollo-router/tests/samples_tests.rs b/apollo-router/tests/samples_tests.rs index 4507089a66..b6f2f902f1 100644 --- a/apollo-router/tests/samples_tests.rs +++ b/apollo-router/tests/samples_tests.rs @@ -176,6 +176,9 @@ impl TestExecution { ) .await } + Action::EndpointRequest { url, request } => { + self.endpoint_request(url, request.clone(), out).await + } Action::Stop => self.stop(out).await, } } @@ -479,6 +482,43 @@ impl TestExecution { Ok(()) } + + async fn endpoint_request( + &mut self, + url: &url::Url, + request: HttpRequest, + out: &mut String, + ) -> Result<(), Failed> { + let client = reqwest::Client::new(); + + let mut builder = client.request( + request + .method + .as_deref() + .unwrap_or("POST") + .try_into() + .unwrap(), + url.clone(), + ); + for (name, value) in request.headers { + builder = builder.header(name, value); + } + + let request = builder.json(&request.body).build().unwrap(); + let response = client.execute(request).await.map_err(|e| { + writeln!( + out, + "could not send request to Router endpoint at {url}: {e}" + ) + .unwrap(); + let f: Failed = out.clone().into(); + f + })?; + + writeln!(out, "Endpoint returned: {response:?}").unwrap(); + + Ok(()) + } } fn open_file(path: &Path, out: &mut String) -> Result { @@ -537,6 +577,10 @@ enum Action { query_path: Option, expected_response: Value, }, + EndpointRequest { + url: url::Url, + request: HttpRequest, + }, Stop, } @@ -547,12 +591,12 @@ struct Subgraph { #[derive(Clone, Debug, Deserialize)] struct SubgraphRequestMock { - request: SubgraphRequest, - response: SubgraphResponse, + request: HttpRequest, + response: HttpResponse, } #[derive(Clone, Debug, Deserialize)] -struct SubgraphRequest { +struct HttpRequest { method: Option, path: Option, #[serde(default)] @@ -561,7 +605,7 @@ struct SubgraphRequest { } #[derive(Clone, Debug, Deserialize)] -struct SubgraphResponse { +struct HttpResponse { status: Option, #[serde(default)] headers: HashMap, From b96224cae9c274829eff3c214b1e963c4292b167 Mon Sep 17 00:00:00 2001 From: Simon Sapin Date: Tue, 30 Jul 2024 16:51:54 +0200 Subject: [PATCH 020/108] Parse supergraph schema early to avoid re-parsing (#5736) As discussed in https://github.com/apollographql/router/pull/5623#issuecomment-2217896741 This may may startup and reload slightly faster for large schemas, but more importantly it makes `spec::Schema` available in more places which will help upcoming changes for Rust replatforming. --- apollo-router/src/axum_factory/tests.rs | 14 +++-- apollo-router/src/orbiter/mod.rs | 2 +- .../cost_calculator/static_cost.rs | 4 +- .../src/plugins/include_subgraph_errors.rs | 4 +- .../src/plugins/record_replay/record.rs | 2 +- apollo-router/src/plugins/test.rs | 16 +++--- .../src/plugins/traffic_shaping/mod.rs | 5 +- .../src/query_planner/bridge_query_planner.rs | 47 ++++++++--------- .../bridge_query_planner_pool.rs | 19 ++----- apollo-router/src/router_factory.rs | 32 +++++------- apollo-router/src/spec/schema.rs | 41 +++++++-------- apollo-router/src/state_machine.rs | 20 +++---- apollo-router/src/test_harness.rs | 4 +- .../src/uplink/license_enforcement.rs | 52 +++++++++---------- ...ent_directive_arg_version_in_range.graphql | 45 ++++++++++++++++ ...directive_arg_version_out_of_range.graphql | 35 +++++++++++++ ..._enforcement_spec_version_in_range.graphql | 21 ++++++++ ...orcement_spec_version_out_of_range.graphql | 21 ++++++++ .../src/uplink/testdata/unix_socket.graphql | 5 ++ 19 files changed, 247 insertions(+), 142 deletions(-) diff --git a/apollo-router/src/axum_factory/tests.rs b/apollo-router/src/axum_factory/tests.rs index dde3852032..5cd6b1a549 100644 --- a/apollo-router/src/axum_factory/tests.rs +++ b/apollo-router/src/axum_factory/tests.rs @@ -85,6 +85,7 @@ use crate::services::RouterResponse; use crate::services::SupergraphResponse; use crate::services::MULTIPART_DEFER_ACCEPT; use crate::services::MULTIPART_DEFER_CONTENT_TYPE; +use crate::spec::Schema; use crate::test_harness::http_client; use crate::test_harness::http_client::MaybeMultipart; use crate::uplink::license_enforcement::LicenseState; @@ -2309,14 +2310,11 @@ async fn test_supergraph_timeout() { let conf: Arc = Arc::new(serde_json::from_value(config).unwrap()); let schema = include_str!("..//testdata/minimal_supergraph.graphql"); - let planner = BridgeQueryPlannerPool::new( - schema.to_string(), - conf.clone(), - NonZeroUsize::new(1).unwrap(), - ) - .await - .unwrap(); - let schema = planner.schema(); + let schema = Arc::new(Schema::parse(schema, &conf).unwrap()); + let planner = + BridgeQueryPlannerPool::new(schema.clone(), conf.clone(), NonZeroUsize::new(1).unwrap()) + .await + .unwrap(); // we do the entire supergraph rebuilding instead of using `from_supergraph_mock_callback_and_configuration` // because we need the plugins to apply on the supergraph diff --git a/apollo-router/src/orbiter/mod.rs b/apollo-router/src/orbiter/mod.rs index ebd3383752..a326e15d48 100644 --- a/apollo-router/src/orbiter/mod.rs +++ b/apollo-router/src/orbiter/mod.rs @@ -97,7 +97,7 @@ impl RouterSuperServiceFactory for OrbiterRouterSuperServiceFactory { &'a mut self, is_telemetry_disabled: bool, configuration: Arc, - schema: String, + schema: Arc, previous_router: Option<&'a Self::RouterFactory>, extra_plugins: Option)>>, ) -> Result { diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs index f84a4fcd0a..1156c6df7d 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs @@ -462,9 +462,9 @@ mod tests { async fn planned_cost(schema_str: &str, query_str: &str) -> f64 { let config: Arc = Arc::new(Default::default()); - let (_schema, query) = parse_schema_and_operation(schema_str, query_str, &config); + let (schema, query) = parse_schema_and_operation(schema_str, query_str, &config); - let mut planner = BridgeQueryPlanner::new(schema_str.to_string(), config.clone(), None) + let mut planner = BridgeQueryPlanner::new(schema.into(), config.clone(), None) .await .unwrap(); diff --git a/apollo-router/src/plugins/include_subgraph_errors.rs b/apollo-router/src/plugins/include_subgraph_errors.rs index 66ffa53917..56aae8045c 100644 --- a/apollo-router/src/plugins/include_subgraph_errors.rs +++ b/apollo-router/src/plugins/include_subgraph_errors.rs @@ -100,6 +100,7 @@ mod test { use crate::services::HasSchema; use crate::services::PluggableSupergraphServiceBuilder; use crate::services::SupergraphRequest; + use crate::spec::Schema; use crate::Configuration; static UNREDACTED_PRODUCT_RESPONSE: Lazy = Lazy::new(|| { @@ -191,8 +192,9 @@ mod test { let schema = include_str!("../../../apollo-router-benchmarks/benches/fixtures/supergraph.graphql"); + let schema = Schema::parse(schema, &Default::default()).unwrap(); let planner = BridgeQueryPlannerPool::new( - schema.to_string(), + schema.into(), Default::default(), NonZeroUsize::new(1).unwrap(), ) diff --git a/apollo-router/src/plugins/record_replay/record.rs b/apollo-router/src/plugins/record_replay/record.rs index e821b016b2..f9dc97b52a 100644 --- a/apollo-router/src/plugins/record_replay/record.rs +++ b/apollo-router/src/plugins/record_replay/record.rs @@ -67,7 +67,7 @@ impl Plugin for Record { enabled: init.config.enabled, supergraph_sdl: init.supergraph_sdl.clone(), storage_path: storage_path.clone().into(), - schema: Arc::new(Schema::parse(&init.supergraph_sdl, &Default::default())?), + schema: Arc::new(Schema::parse_arc(init.supergraph_sdl, &Default::default())?), }; if init.config.enabled { diff --git a/apollo-router/src/plugins/test.rs b/apollo-router/src/plugins/test.rs index c31ae9acc7..523e07f253 100644 --- a/apollo-router/src/plugins/test.rs +++ b/apollo-router/src/plugins/test.rs @@ -18,6 +18,7 @@ use crate::services::http; use crate::services::router; use crate::services::subgraph; use crate::services::supergraph; +use crate::spec::Schema; use crate::Configuration; use crate::Notify; @@ -88,17 +89,16 @@ impl PluginTestHarness { .unwrap_or(Value::Object(Default::default())); let (supergraph_sdl, parsed_schema, subgraph_schemas) = if let Some(schema) = schema { - let planner = BridgeQueryPlanner::new(schema.to_string(), Arc::new(config), None) + let schema = Schema::parse(schema, &config).unwrap(); + let sdl = schema.raw_sdl.clone(); + let supergraph = schema.supergraph_schema().clone(); + let planner = BridgeQueryPlanner::new(schema.into(), Arc::new(config), None) .await .unwrap(); - ( - schema.to_string(), - planner.schema().supergraph_schema().clone(), - planner.subgraph_schemas(), - ) + (sdl, supergraph, planner.subgraph_schemas()) } else { ( - "".to_string(), + "".to_string().into(), Valid::assume_valid(apollo_compiler::Schema::new()), Default::default(), ) @@ -106,7 +106,7 @@ impl PluginTestHarness { let plugin_init = PluginInit::builder() .config(config_for_plugin.clone()) - .supergraph_sdl(Arc::new(supergraph_sdl)) + .supergraph_sdl(supergraph_sdl) .supergraph_schema(Arc::new(parsed_schema)) .subgraph_schemas(subgraph_schemas) .notify(Notify::default()) diff --git a/apollo-router/src/plugins/traffic_shaping/mod.rs b/apollo-router/src/plugins/traffic_shaping/mod.rs index a3ddda0e6d..e4e2eabfa1 100644 --- a/apollo-router/src/plugins/traffic_shaping/mod.rs +++ b/apollo-router/src/plugins/traffic_shaping/mod.rs @@ -484,6 +484,7 @@ mod test { use crate::services::PluggableSupergraphServiceBuilder; use crate::services::SupergraphRequest; use crate::services::SupergraphResponse; + use crate::spec::Schema; use crate::Configuration; static EXPECTED_RESPONSE: Lazy = Lazy::new(|| { @@ -568,14 +569,14 @@ mod test { .unwrap(); let config = Arc::new(config); + let schema = Arc::new(Schema::parse(schema, &config).unwrap()); let planner = BridgeQueryPlannerPool::new( - schema.to_string(), + schema.clone(), config.clone(), NonZeroUsize::new(1).unwrap(), ) .await .unwrap(); - let schema = planner.schema(); let subgraph_schemas = planner.subgraph_schemas(); let mut builder = diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index eb5990e43e..f2348ef11b 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -323,11 +323,10 @@ impl PlannerMode { impl BridgeQueryPlanner { pub(crate) async fn new( - schema: String, + schema: Arc, configuration: Arc, old_planner: Option>>, ) -> Result { - let schema = Schema::parse(&schema, &configuration)?; let planner = PlannerMode::new(&schema, &configuration, old_planner).await?; let subgraph_schemas = Arc::new(planner.subgraphs().await?); @@ -353,7 +352,7 @@ impl BridgeQueryPlanner { Ok(Self { planner, - schema: Arc::new(schema), + schema, subgraph_schemas, introspection, enable_authorization_directives, @@ -369,6 +368,7 @@ impl BridgeQueryPlanner { .clone() } + #[cfg(test)] pub(crate) fn schema(&self) -> Arc { self.schema.clone() } @@ -988,13 +988,12 @@ mod tests { #[test(tokio::test)] async fn federation_versions() { async { - let _planner = BridgeQueryPlanner::new( - include_str!("../testdata/minimal_supergraph.graphql").into(), - Default::default(), - None, - ) - .await - .unwrap(); + let sdl = include_str!("../testdata/minimal_supergraph.graphql"); + let config = Arc::default(); + let schema = Schema::parse(sdl, &config).unwrap(); + let _planner = BridgeQueryPlanner::new(schema.into(), config, None) + .await + .unwrap(); assert_gauge!( "apollo.router.supergraph.federation", @@ -1006,13 +1005,12 @@ mod tests { .await; async { - let _planner = BridgeQueryPlanner::new( - include_str!("../testdata/minimal_fed2_supergraph.graphql").into(), - Default::default(), - None, - ) - .await - .unwrap(); + let sdl = include_str!("../testdata/minimal_fed2_supergraph.graphql"); + let config = Arc::default(); + let schema = Schema::parse(sdl, &config).unwrap(); + let _planner = BridgeQueryPlanner::new(schema.into(), config, None) + .await + .unwrap(); assert_gauge!( "apollo.router.supergraph.federation", @@ -1026,10 +1024,10 @@ mod tests { #[test(tokio::test)] async fn empty_query_plan_should_be_a_planner_error() { - let schema = Schema::parse(EXAMPLE_SCHEMA, &Default::default()).unwrap(); + let schema = Arc::new(Schema::parse(EXAMPLE_SCHEMA, &Default::default()).unwrap()); let query = include_str!("testdata/unknown_introspection_query.graphql"); - let planner = BridgeQueryPlanner::new(EXAMPLE_SCHEMA.to_string(), Default::default(), None) + let planner = BridgeQueryPlanner::new(schema.clone(), Default::default(), None) .await .unwrap(); @@ -1128,10 +1126,10 @@ mod tests { configuration.supergraph.introspection = true; let configuration = Arc::new(configuration); - let planner = - BridgeQueryPlanner::new(EXAMPLE_SCHEMA.to_string(), configuration.clone(), None) - .await - .unwrap(); + let schema = Schema::parse(EXAMPLE_SCHEMA, &configuration).unwrap(); + let planner = BridgeQueryPlanner::new(schema.into(), configuration.clone(), None) + .await + .unwrap(); macro_rules! s { ($query: expr) => { @@ -1436,7 +1434,8 @@ mod tests { configuration.supergraph.introspection = true; let configuration = Arc::new(configuration); - let planner = BridgeQueryPlanner::new(schema.to_string(), configuration.clone(), None) + let schema = Schema::parse(schema, &configuration).unwrap(); + let planner = BridgeQueryPlanner::new(schema.into(), configuration.clone(), None) .await .unwrap(); diff --git a/apollo-router/src/query_planner/bridge_query_planner_pool.rs b/apollo-router/src/query_planner/bridge_query_planner_pool.rs index 5da661c4d2..19e80b539e 100644 --- a/apollo-router/src/query_planner/bridge_query_planner_pool.rs +++ b/apollo-router/src/query_planner/bridge_query_planner_pool.rs @@ -40,16 +40,16 @@ pub(crate) struct BridgeQueryPlannerPool { impl BridgeQueryPlannerPool { pub(crate) async fn new( - sdl: String, + schema: Arc, configuration: Arc, size: NonZeroUsize, ) -> Result { - Self::new_from_planners(Default::default(), sdl, configuration, size).await + Self::new_from_planners(Default::default(), schema, configuration, size).await } pub(crate) async fn new_from_planners( old_planners: Vec>>, - schema: String, + schema: Arc, configuration: Arc, size: NonZeroUsize, ) -> Result { @@ -63,12 +63,12 @@ impl BridgeQueryPlannerPool { let mut old_planners_iterator = old_planners.into_iter(); (0..size.into()).for_each(|_| { - let sdl = schema.clone(); + let schema = schema.clone(); let configuration = configuration.clone(); let old_planner = old_planners_iterator.next(); join_set.spawn(async move { - BridgeQueryPlanner::new(sdl, configuration, old_planner).await + BridgeQueryPlanner::new(schema, configuration, old_planner).await }); }); @@ -80,15 +80,6 @@ impl BridgeQueryPlannerPool { bridge_query_planners.push(bridge_query_planner); } - let schema = bridge_query_planners - .first() - .ok_or_else(|| { - ServiceBuildError::QueryPlannerError(QueryPlannerError::PoolProcessing( - "There should be at least 1 Query Planner service in pool".to_string(), - )) - })? - .schema(); - let subgraph_schemas = bridge_query_planners .first() .ok_or_else(|| { diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index e2d20593a9..5779215a5b 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -141,7 +141,7 @@ pub(crate) trait RouterSuperServiceFactory: Send + Sync + 'static { &'a mut self, is_telemetry_disabled: bool, configuration: Arc, - schema: String, + schema: Arc, previous_router: Option<&'a Self::RouterFactory>, extra_plugins: Option)>>, ) -> Result; @@ -159,7 +159,7 @@ impl RouterSuperServiceFactory for YamlRouterFactory { &'a mut self, _is_telemetry_disabled: bool, configuration: Arc, - schema: String, + schema: Arc, previous_router: Option<&'a Self::RouterFactory>, extra_plugins: Option)>>, ) -> Result { @@ -179,17 +179,13 @@ impl RouterSuperServiceFactory for YamlRouterFactory { .get("telemetry") .cloned(); if let Some(plugin_config) = &mut telemetry_config { - inject_schema_id(Some(&Schema::schema_id(&schema)), plugin_config); + inject_schema_id(Some(&schema.schema_id), plugin_config); match factory .create_instance( PluginInit::builder() .config(plugin_config.clone()) - .supergraph_sdl(Arc::new(schema.clone())) - .supergraph_schema(Arc::new( - apollo_compiler::validation::Valid::assume_valid( - apollo_compiler::Schema::new(), - ), - )) + .supergraph_sdl(schema.raw_sdl.clone()) + .supergraph_schema(Arc::new(schema.supergraph_schema().clone())) .notify(configuration.notify.clone()) .build(), ) @@ -227,7 +223,7 @@ impl YamlRouterFactory { async fn inner_create<'a>( &'a mut self, configuration: Arc, - schema: String, + schema: Arc, previous_router: Option<&'a RouterCreator>, initial_telemetry_plugin: Option>, extra_plugins: Option)>>, @@ -302,7 +298,7 @@ impl YamlRouterFactory { pub(crate) async fn inner_create_supergraph<'a>( &'a mut self, configuration: Arc, - schema: String, + schema: Arc, previous_supergraph: Option<&'a SupergraphCreator>, initial_telemetry_plugin: Option>, extra_plugins: Option)>>, @@ -339,7 +335,7 @@ impl YamlRouterFactory { }; let schema_changed = previous_supergraph - .map(|supergraph_creator| supergraph_creator.schema().raw_sdl.as_ref() == &schema) + .map(|supergraph_creator| supergraph_creator.schema().raw_sdl == schema.raw_sdl) .unwrap_or_default(); let config_changed = previous_supergraph @@ -519,16 +515,11 @@ fn load_certs(certificates: &str) -> io::Result> { /// not meant to be used directly pub async fn create_test_service_factory_from_yaml(schema: &str, configuration: &str) { let config: Configuration = serde_yaml::from_str(configuration).unwrap(); + let schema = Arc::new(Schema::parse(schema, &config).unwrap()); let is_telemetry_disabled = false; let service = YamlRouterFactory - .create( - is_telemetry_disabled, - Arc::new(config), - schema.to_string(), - None, - None, - ) + .create(is_telemetry_disabled, Arc::new(config), schema, None, None) .await; assert_eq!( service.map(|_| ()).unwrap_err().to_string().as_str(), @@ -973,13 +964,14 @@ mod test { async fn create_service(config: Configuration) -> Result<(), BoxError> { let schema = include_str!("testdata/supergraph.graphql"); + let schema = Schema::parse(schema, &config)?; let is_telemetry_disabled = false; let service = YamlRouterFactory .create( is_telemetry_disabled, Arc::new(config), - schema.to_string(), + Arc::new(schema), None, None, ) diff --git a/apollo-router/src/spec/schema.rs b/apollo-router/src/spec/schema.rs index 07f13f746a..05546910ca 100644 --- a/apollo-router/src/spec/schema.rs +++ b/apollo-router/src/spec/schema.rs @@ -5,7 +5,6 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Instant; -use apollo_compiler::ast; use apollo_compiler::schema::Implementers; use apollo_compiler::validation::Valid; use apollo_compiler::Name; @@ -38,32 +37,30 @@ pub(crate) struct Schema { pub(crate) struct ApiSchema(pub(crate) ValidFederationSchema); impl Schema { - pub(crate) fn parse_ast(sdl: &str) -> Result { + pub(crate) fn parse(raw_sdl: &str, config: &Configuration) -> Result { + Self::parse_arc(raw_sdl.to_owned().into(), config) + } + + pub(crate) fn parse_arc( + raw_sdl: Arc, + config: &Configuration, + ) -> Result { + let start = Instant::now(); let mut parser = apollo_compiler::parser::Parser::new(); - let result = parser.parse_ast(sdl, "schema.graphql"); + let result = parser.parse_ast(raw_sdl.as_ref(), "schema.graphql"); // Trace log recursion limit data let recursion_limit = parser.recursion_reached(); tracing::trace!(?recursion_limit, "recursion limit data"); - result.map_err(|invalid| { - SchemaError::Parse(ParseErrors { - errors: invalid.errors, - }) - }) - } - - pub(crate) fn parse_compiler_schema( - sdl: &str, - ) -> Result, SchemaError> { - Self::parse_ast(sdl)? + let definitions = result + .map_err(|invalid| { + SchemaError::Parse(ParseErrors { + errors: invalid.errors, + }) + })? .to_schema_validate() - .map_err(|errors| SchemaError::Validate(errors.into())) - } - - pub(crate) fn parse(sdl: &str, config: &Configuration) -> Result { - let start = Instant::now(); - let definitions = Self::parse_compiler_schema(sdl)?; + .map_err(|errors| SchemaError::Validate(errors.into()))?; let mut subgraphs = HashMap::new(); // TODO: error if not found? @@ -111,7 +108,7 @@ impl Schema { let implementers_map = definitions.implementers_map(); let supergraph = Supergraph::from_schema(definitions)?; - let schema_id = Arc::new(Schema::schema_id(sdl)); + let schema_id = Arc::new(Schema::schema_id(&raw_sdl)); let api_schema = supergraph .to_api_schema(ApiSchemaOptions { @@ -125,7 +122,7 @@ impl Schema { })?; Ok(Schema { - raw_sdl: Arc::new(sdl.to_owned()), + raw_sdl, supergraph, subgraphs, implementers_map, diff --git a/apollo-router/src/state_machine.rs b/apollo-router/src/state_machine.rs index 0a141e1669..e3ce6c3a67 100644 --- a/apollo-router/src/state_machine.rs +++ b/apollo-router/src/state_machine.rs @@ -308,7 +308,7 @@ impl State { server_handle: &mut Option, previous_router_service_factory: Option<&FA::RouterFactory>, configuration: Arc, - schema: Arc, + sdl: Arc, license: LicenseState, listen_addresses_guard: &mut OwnedRwLockWriteGuard, mut all_connections_stopped_signals: Vec>, @@ -317,12 +317,12 @@ impl State { S: HttpServerFactory, FA: RouterSuperServiceFactory, { - let report = { - let ast = Schema::parse_ast(&schema) - .map_err(|e| ServiceCreationError(e.to_string().into()))?; - // Check the license - LicenseEnforcementReport::build(&configuration, &ast) - }; + let schema = Arc::new( + Schema::parse_arc(sdl.clone(), &configuration) + .map_err(|e| ServiceCreationError(e.to_string().into()))?, + ); + // Check the license + let report = LicenseEnforcementReport::build(&configuration, &schema); match license { LicenseState::Licensed => { @@ -362,7 +362,7 @@ impl State { .create( state_machine.is_telemetry_disabled, configuration.clone(), - schema.to_string(), + schema, previous_router_service_factory, None, ) @@ -422,7 +422,7 @@ impl State { Ok(Running { configuration, _metrics: metrics, - schema, + schema: sdl, license, server_handle: Some(server_handle), router_service_factory, @@ -1119,7 +1119,7 @@ mod tests { &'a mut self, is_telemetry_disabled: bool, configuration: Arc, - schema: String, + schema: Arc, previous_router_service_factory: Option<&'a MockMyRouterFactory>, extra_plugins: Option)>>, ) -> Result; diff --git a/apollo-router/src/test_harness.rs b/apollo-router/src/test_harness.rs index 7e921ffaf3..a0b5384489 100644 --- a/apollo-router/src/test_harness.rs +++ b/apollo-router/src/test_harness.rs @@ -34,6 +34,7 @@ use crate::services::subgraph; use crate::services::supergraph; use crate::services::HasSchema; use crate::services::SupergraphCreator; +use crate::spec::Schema; use crate::uplink::license_enforcement::LicenseState; /// Mocks for services the Apollo Router must integrate with. @@ -291,10 +292,11 @@ impl<'a> TestHarness<'a> { let config = builder.configuration.unwrap_or_default(); let canned_schema = include_str!("../testing_schema.graphql"); let schema = builder.schema.unwrap_or(canned_schema); + let schema = Arc::new(Schema::parse(schema, &config)?); let supergraph_creator = YamlRouterFactory .inner_create_supergraph( config.clone(), - schema.to_string(), + schema, None, None, Some(builder.extra_plugins), diff --git a/apollo-router/src/uplink/license_enforcement.rs b/apollo-router/src/uplink/license_enforcement.rs index 21ab68d970..2d77fb3683 100644 --- a/apollo-router/src/uplink/license_enforcement.rs +++ b/apollo-router/src/uplink/license_enforcement.rs @@ -11,9 +11,8 @@ use std::time::Duration; use std::time::SystemTime; use std::time::UNIX_EPOCH; -use apollo_compiler::ast::Definition; use apollo_compiler::schema::Directive; -use apollo_compiler::Node; +use apollo_compiler::schema::ExtendedType; use buildstructor::Builder; use displaydoc::Display; use itertools::Itertools; @@ -31,6 +30,7 @@ use thiserror::Error; use url::Url; use crate::plugins::authentication::convert_key_algorithm; +use crate::spec::Schema; use crate::spec::LINK_AS_ARGUMENT; use crate::spec::LINK_DIRECTIVE_NAME; use crate::spec::LINK_URL_ARGUMENT; @@ -101,7 +101,7 @@ struct ParsedLinkSpec { impl ParsedLinkSpec { fn from_link_directive( - link_directive: &Node, + link_directive: &Directive, ) -> Option> { link_directive .argument_by_name(LINK_URL_ARGUMENT) @@ -157,7 +157,7 @@ impl LicenseEnforcementReport { pub(crate) fn build( configuration: &Configuration, - schema: &apollo_compiler::ast::Document, + schema: &Schema, ) -> LicenseEnforcementReport { LicenseEnforcementReport { restricted_config_in_use: Self::validate_configuration( @@ -197,14 +197,14 @@ impl LicenseEnforcementReport { } fn validate_schema( - schema: &apollo_compiler::ast::Document, + schema: &Schema, schema_restrictions: &Vec, ) -> Vec { let link_specs = schema - .definitions - .iter() - .filter_map(|def| def.as_schema_definition()) - .flat_map(|def| def.directives.get_all(LINK_DIRECTIVE_NAME)) + .supergraph_schema() + .schema_definition + .directives + .get_all(LINK_DIRECTIVE_NAME) .filter_map(|link| { ParsedLinkSpec::from_link_directive(link).map(|maybe_spec| { maybe_spec.ok().map(|spec| (spec.spec_url.to_owned(), spec)) @@ -214,18 +214,8 @@ impl LicenseEnforcementReport { let mut schema_violations: Vec = Vec::new(); - for subgraph_url in schema - .definitions - .iter() - .filter_map(|def| def.as_enum_type_definition()) - .filter(|def| def.name == "join__Graph") - .flat_map(|def| def.values.iter()) - .flat_map(|val| val.directives.iter()) - .filter(|d| d.name == "join__graph") - .filter_map(|dir| (dir.arguments.iter().find(|arg| arg.name == "url"))) - .filter_map(|arg| arg.value.as_str()) - { - if subgraph_url.starts_with("unix://") { + for (_subgraph_name, subgraph_url) in schema.subgraphs() { + if subgraph_url.scheme_str() == Some("unix") { schema_violations.push(SchemaViolation::DirectiveArgument { url: "https://specs.apollo.dev/join/v0.3".to_string(), name: "join__Graph".to_string(), @@ -262,16 +252,19 @@ impl LicenseEnforcementReport { if version_req.matches(&link_spec.version) { let directive_name = link_spec.directive_name(name); if schema - .definitions - .iter() + .supergraph_schema() + .types + .values() .flat_map(|def| match def { // To traverse additional directive locations, add match arms for the respective definition types required. // As of writing this, this is only implemented for finding usages of progressive override on object type fields, but it can be extended to other directive locations trivially. - Definition::ObjectTypeDefinition(object_type_def) => { - let directives_on_object = - object_type_def.directives.get_all(&directive_name); + ExtendedType::Object(object_type_def) => { + let directives_on_object = object_type_def + .directives + .get_all(&directive_name) + .map(|component| &component.node); let directives_on_fields = - object_type_def.fields.iter().flat_map(|field| { + object_type_def.fields.values().flat_map(|field| { field.directives.get_all(&directive_name) }); @@ -682,9 +675,11 @@ mod test { use crate::uplink::license_enforcement::OneOrMany; use crate::Configuration; + #[track_caller] fn check(router_yaml: &str, supergraph_schema: &str) -> LicenseEnforcementReport { let config = Configuration::from_str(router_yaml).expect("router config must be valid"); - let schema = Schema::parse_ast(supergraph_schema).expect("supergraph schema must be valid"); + let schema = + Schema::parse(supergraph_schema, &config).expect("supergraph schema must be valid"); LicenseEnforcementReport::build(&config, &schema) } @@ -730,6 +725,7 @@ mod test { } #[test] + #[cfg(not(windows))] // http::uri::Uri parsing appears to reject unix:// on Windows fn test_restricted_unix_socket_via_schema() { let report = check( include_str!("testdata/oss.router.yaml"), diff --git a/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_in_range.graphql b/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_in_range.graphql index 563cc14c98..381d34ddd5 100644 --- a/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_in_range.graphql +++ b/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_in_range.graphql @@ -4,6 +4,51 @@ schema query: Query } + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field( + graph: join__Graph + requires: join__FieldSet + provides: join__FieldSet + type: String + external: Boolean + override: String + usedOverridden: Boolean + overrideLabel: String +) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +scalar join__FieldSet + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "products", url: "http://localhost:4001/") + SUBGRAPH2 @join__graph(name: "reviews", url: "http://localhost:4002/") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + type Query @join__type(graph: SUBGRAPH1) @join__type(graph: SUBGRAPH2) { t: T @join__field(graph: SUBGRAPH1) } diff --git a/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_out_of_range.graphql b/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_out_of_range.graphql index a62a24953a..586124f47a 100644 --- a/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_out_of_range.graphql +++ b/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_out_of_range.graphql @@ -4,6 +4,41 @@ schema query: Query } +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +scalar join__FieldSet + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "products", url: "http://localhost:4001/") + SUBGRAPH2 @join__graph(name: "reviews", url: "http://localhost:4002/") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + type Query @join__type(graph: SUBGRAPH1) @join__type(graph: SUBGRAPH2) { t: T @join__field(graph: SUBGRAPH1) } diff --git a/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_in_range.graphql b/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_in_range.graphql index a265252bd4..ded63469ef 100644 --- a/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_in_range.graphql +++ b/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_in_range.graphql @@ -1,5 +1,26 @@ schema @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/authenticated/v0.1", for: SECURITY) { query: Query } + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar link__Import + +type Query { + field: Int +} diff --git a/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_out_of_range.graphql b/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_out_of_range.graphql index 5266cfeb47..231cb51c48 100644 --- a/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_out_of_range.graphql +++ b/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_out_of_range.graphql @@ -1,5 +1,26 @@ schema @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/authenticated/v0.2", for: SECURITY) { query: Query } + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar link__Import + +type Query { + field: Int +} diff --git a/apollo-router/src/uplink/testdata/unix_socket.graphql b/apollo-router/src/uplink/testdata/unix_socket.graphql index 910a221a90..5ea6507f76 100644 --- a/apollo-router/src/uplink/testdata/unix_socket.graphql +++ b/apollo-router/src/uplink/testdata/unix_socket.graphql @@ -20,6 +20,11 @@ directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA +directive @authenticated on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM + +scalar federation__Scope +directive @requiresScopes(scopes: [[federation__Scope!]!]!) on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM + scalar join__FieldSet enum join__Graph { From 68c808acf450db724562e59218c55792b9bdd55d Mon Sep 17 00:00:00 2001 From: Nick Marsh Date: Wed, 31 Jul 2024 16:41:11 +1000 Subject: [PATCH 021/108] Fix enum reporting in deferred responses --- apollo-router/src/apollo_studio_interop/mod.rs | 7 +++---- apollo-router/src/apollo_studio_interop/tests.rs | 5 ++++- apollo-router/src/plugins/telemetry/mod.rs | 6 ++++-- apollo-router/src/services/execution/service.rs | 11 ++++++++--- 4 files changed, 19 insertions(+), 10 deletions(-) diff --git a/apollo-router/src/apollo_studio_interop/mod.rs b/apollo-router/src/apollo_studio_interop/mod.rs index e1571fa672..16b1a2f9ee 100644 --- a/apollo-router/src/apollo_studio_interop/mod.rs +++ b/apollo-router/src/apollo_studio_interop/mod.rs @@ -285,18 +285,17 @@ pub(crate) fn extract_enums_from_response( operation_name: Option<&str>, schema: &Valid, response_body: &Object, -) -> ReferencedEnums { - let mut result = ReferencedEnums::new(); + existing_refs: &mut ReferencedEnums, +) { if let Some(operation) = query.operation(operation_name) { extract_enums_from_selection_set( &operation.selection_set, &query.fragments, schema, response_body, - &mut result, + existing_refs, ); } - result } fn add_enum_value_to_map( diff --git a/apollo-router/src/apollo_studio_interop/tests.rs b/apollo-router/src/apollo_studio_interop/tests.rs index b6c1e03d22..34f457a196 100644 --- a/apollo-router/src/apollo_studio_interop/tests.rs +++ b/apollo-router/src/apollo_studio_interop/tests.rs @@ -130,12 +130,15 @@ fn enums_from_response( let query = Query::parse(query_str, operation_name, &schema, &config).unwrap(); let response_body: Object = serde_json::from_str(response_body_str).unwrap(); + let mut result = ReferencedEnums::new(); extract_enums_from_response( Arc::new(query), operation_name, schema.supergraph_schema(), &response_body, - ) + &mut result, + ); + result } #[test(tokio::test)] diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index 42b34cbfae..8d934cf0e5 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -1526,11 +1526,12 @@ impl Telemetry { // If extended references or enums from responses are populated, we want to add them to the SingleStatsReport let extended_references = context .extensions() - .with_lock(|lock| lock.get::().cloned()) + .with_lock(|mut lock| lock.remove::()) .unwrap_or_default(); + // todo test extended references as well somehow let enum_response_references = context .extensions() - .with_lock(|lock| lock.get::().cloned()) + .with_lock(|mut lock| lock.remove::()) .unwrap_or_default(); SingleStatsReport { @@ -1594,6 +1595,7 @@ impl Telemetry { } } } else { + // todo is the issue here? // Usage reporting was missing, so it counts as one operation. SingleStatsReport { licensed_operation_count_by_type: LicensedOperationCountByType { diff --git a/apollo-router/src/services/execution/service.rs b/apollo-router/src/services/execution/service.rs index 1902fe80c6..10990f0c6b 100644 --- a/apollo-router/src/services/execution/service.rs +++ b/apollo-router/src/services/execution/service.rs @@ -291,6 +291,7 @@ impl ExecutionService { && response.data.is_none() && response.errors.is_empty() { + // todo: empty signature in this case? return response.into(); } @@ -353,16 +354,20 @@ impl ExecutionService { nullified_paths.extend(paths); - let referenced_enums = if let (ApolloMetricsReferenceMode::Extended, Some(Value::Object(response_body))) = (metrics_ref_mode, &response.data) { + let mut referenced_enums = context + .extensions() + .with_lock(|lock| lock.get::().cloned()) + .unwrap_or_default(); + if let (ApolloMetricsReferenceMode::Extended, Some(Value::Object(response_body))) = (metrics_ref_mode, &response.data) { extract_enums_from_response( query.clone(), operation_name, schema.api_schema(), response_body, + &mut referenced_enums, ) - } else { - ReferencedEnums::new() }; + context .extensions() .with_lock(|mut lock| lock.insert::(referenced_enums)); From d89359a9d75f9cb6cfa828126c5fec0053ed198a Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Wed, 31 Jul 2024 09:26:59 +0200 Subject: [PATCH 022/108] Entity cache redis configuration override (#5626) --- ...026-entity_cache_invalidation_preview.yaml | 6 + ...nfiguration__tests__schema_generation.snap | 10 +- ...nfiguration@entity_cache_preview.yaml.snap | 11 +- ...figuration@entity_cache_preview2.yaml.snap | 19 +++ .../testdata/metrics/entities.router.yaml | 9 +- .../migrations/entity_cache_preview2.yaml | 11 ++ apollo-router/src/plugins/cache/entity.rs | 108 ++++++++++++++---- .../src/plugins/cache/invalidation.rs | 66 +++++------ .../plugins/cache/invalidation_endpoint.rs | 22 ++-- apollo-router/src/plugins/cache/tests.rs | 2 + .../uplink/testdata/restricted.router.yaml | 6 +- apollo-router/tests/integration/redis.rs | 26 ++--- docs/source/configuration/entity-caching.mdx | 12 +- 13 files changed, 202 insertions(+), 106 deletions(-) create mode 100644 apollo-router/src/configuration/migrations/0026-entity_cache_invalidation_preview.yaml create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview2.yaml.snap create mode 100644 apollo-router/src/configuration/testdata/migrations/entity_cache_preview2.yaml diff --git a/apollo-router/src/configuration/migrations/0026-entity_cache_invalidation_preview.yaml b/apollo-router/src/configuration/migrations/0026-entity_cache_invalidation_preview.yaml new file mode 100644 index 0000000000..60a9a83604 --- /dev/null +++ b/apollo-router/src/configuration/migrations/0026-entity_cache_invalidation_preview.yaml @@ -0,0 +1,6 @@ +description: Entity cache preview configuration format is changing +actions: + - type: move + from: preview_entity_cache.redis + to: preview_entity_cache.subgraph.all.redis + diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index a702a933cd..6e925407a6 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -1615,17 +1615,12 @@ expression: "&schema" "$ref": "#/definitions/Metrics", "description": "#/definitions/Metrics" }, - "redis": { - "$ref": "#/definitions/RedisCache", - "description": "#/definitions/RedisCache" - }, "subgraph": { "$ref": "#/definitions/SubgraphConfiguration_for_Subgraph", "description": "#/definitions/SubgraphConfiguration_for_Subgraph" } }, "required": [ - "redis", "subgraph" ], "type": "object" @@ -5609,6 +5604,11 @@ expression: "&schema" "nullable": true, "type": "string" }, + "redis": { + "$ref": "#/definitions/RedisCache", + "description": "#/definitions/RedisCache", + "nullable": true + }, "ttl": { "$ref": "#/definitions/Ttl", "description": "#/definitions/Ttl", diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview.yaml.snap index 5544788d20..5fda93d394 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview.yaml.snap @@ -4,11 +4,6 @@ expression: new_config --- --- preview_entity_cache: - redis: - urls: - - "redis://localhost:6379" - timeout: 5ms - ttl: 60s enabled: true invalidation: listen: "127.0.0.1:4000" @@ -19,3 +14,9 @@ preview_entity_cache: enabled: false products: ttl: 120s + all: + redis: + urls: + - "redis://localhost:6379" + timeout: 5ms + ttl: 60s diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview2.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview2.yaml.snap new file mode 100644 index 0000000000..3fbf236aaa --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview2.yaml.snap @@ -0,0 +1,19 @@ +--- +source: apollo-router/src/configuration/tests.rs +expression: new_config +--- +--- +preview_entity_cache: + enabled: true + subgraph: + subgraphs: + accounts: + enabled: false + products: + ttl: 120s + all: + redis: + urls: + - "redis://localhost:6379" + timeout: 5ms + ttl: 60s diff --git a/apollo-router/src/configuration/testdata/metrics/entities.router.yaml b/apollo-router/src/configuration/testdata/metrics/entities.router.yaml index 0c886c2d64..e2cbd0ee04 100644 --- a/apollo-router/src/configuration/testdata/metrics/entities.router.yaml +++ b/apollo-router/src/configuration/testdata/metrics/entities.router.yaml @@ -1,14 +1,15 @@ preview_entity_cache: enabled: false - redis: - urls: [ "redis://localhost:6379" ] - timeout: 5ms - ttl: 60s invalidation: listen: 127.0.0.1:4000 path: /invalidation subgraph: all: + redis: + urls: [ "redis://localhost:6379" ] + timeout: 5ms + ttl: 60s + enabled: true subgraphs: accounts: diff --git a/apollo-router/src/configuration/testdata/migrations/entity_cache_preview2.yaml b/apollo-router/src/configuration/testdata/migrations/entity_cache_preview2.yaml new file mode 100644 index 0000000000..2539a571ce --- /dev/null +++ b/apollo-router/src/configuration/testdata/migrations/entity_cache_preview2.yaml @@ -0,0 +1,11 @@ +preview_entity_cache: + redis: + urls: [ "redis://localhost:6379" ] + timeout: 5ms + ttl: 60s + enabled: true + subgraphs: + accounts: + enabled: false + products: + ttl: 120s \ No newline at end of file diff --git a/apollo-router/src/plugins/cache/entity.rs b/apollo-router/src/plugins/cache/entity.rs index 7332ca0d10..1bdb3c6614 100644 --- a/apollo-router/src/plugins/cache/entity.rs +++ b/apollo-router/src/plugins/cache/entity.rs @@ -66,7 +66,7 @@ register_plugin!("apollo", "preview_entity_cache", EntityCache); #[derive(Clone)] pub(crate) struct EntityCache { - storage: Option, + storage: Arc, endpoint_config: Option>, subgraphs: Arc>, entity_type: Option, @@ -76,11 +76,21 @@ pub(crate) struct EntityCache { pub(crate) invalidation: Invalidation, } +pub(crate) struct Storage { + all: Option, + subgraphs: HashMap, +} + +impl Storage { + pub(crate) fn get(&self, subgraph: &str) -> Option<&RedisCacheStorage> { + self.subgraphs.get(subgraph).or(self.all.as_ref()) + } +} + /// Configuration for entity caching #[derive(Clone, Debug, JsonSchema, Deserialize)] #[serde(rename_all = "snake_case", deny_unknown_fields)] pub(crate) struct Config { - redis: RedisCache, /// Enable or disable the entity caching feature #[serde(default)] enabled: bool, @@ -100,6 +110,9 @@ pub(crate) struct Config { #[derive(Clone, Debug, JsonSchema, Deserialize, Serialize)] #[serde(rename_all = "snake_case", deny_unknown_fields, default)] pub(crate) struct Subgraph { + /// Redis configuration + pub(crate) redis: Option, + /// expiration for all keys for this subgraph, unless overriden by the `Cache-Control` header in subgraph responses pub(crate) ttl: Option, @@ -116,6 +129,7 @@ pub(crate) struct Subgraph { impl Default for Subgraph { fn default() -> Self { Self { + redis: None, enabled: true, ttl: Default::default(), private_id: Default::default(), @@ -172,26 +186,64 @@ impl Plugin for EntityCache { .query .as_ref() .map(|q| q.name.to_string()); - let required_to_start = init.config.redis.required_to_start; - // we need to explicitely disable TTL reset because it is managed directly by this plugin - let mut redis_config = init.config.redis.clone(); - redis_config.reset_ttl = false; - let storage = match RedisCacheStorage::new(redis_config).await { - Ok(storage) => Some(storage), - Err(e) => { - tracing::error!( - cache = "entity", - e, - "could not open connection to Redis for caching", - ); - if required_to_start { - return Err(e); + + let mut all = None; + + if let Some(redis) = &init.config.subgraph.all.redis { + let mut redis_config = redis.clone(); + let required_to_start = redis_config.required_to_start; + // we need to explicitely disable TTL reset because it is managed directly by this plugin + redis_config.reset_ttl = false; + all = match RedisCacheStorage::new(redis_config).await { + Ok(storage) => Some(storage), + Err(e) => { + tracing::error!( + cache = "entity", + e, + "could not open connection to Redis for caching", + ); + if required_to_start { + return Err(e); + } + None + } + }; + } + let mut subgraph_storages = HashMap::new(); + for (subgraph, config) in &init.config.subgraph.subgraphs { + if let Some(redis) = &config.redis { + let required_to_start = redis.required_to_start; + // we need to explicitely disable TTL reset because it is managed directly by this plugin + let mut redis_config = redis.clone(); + redis_config.reset_ttl = false; + let storage = match RedisCacheStorage::new(redis_config).await { + Ok(storage) => Some(storage), + Err(e) => { + tracing::error!( + cache = "entity", + e, + "could not open connection to Redis for caching", + ); + if required_to_start { + return Err(e); + } + None + } + }; + if let Some(storage) = storage { + subgraph_storages.insert(subgraph.clone(), storage); } - None } - }; + } - if init.config.redis.ttl.is_none() + if init + .config + .subgraph + .all + .redis + .as_ref() + .map(|r| r.ttl.is_none()) + .unwrap_or(false) && init .config .subgraph @@ -220,6 +272,11 @@ impl Plugin for EntityCache { ); } + let storage = Arc::new(Storage { + all, + subgraphs: subgraph_storages, + }); + let invalidation = Invalidation::new(storage.clone()).await?; Ok(Self { @@ -256,8 +313,8 @@ impl Plugin for EntityCache { name: &str, mut service: subgraph::BoxService, ) -> subgraph::BoxService { - let storage = match self.storage.clone() { - Some(storage) => storage, + let storage = match self.storage.get(name) { + Some(storage) => storage.clone(), None => { return ServiceBuilder::new() .map_response(move |response: subgraph::Response| { @@ -386,9 +443,14 @@ impl EntityCache { use std::net::Ipv4Addr; use std::net::SocketAddr; - let invalidation = Invalidation::new(Some(storage.clone())).await?; + let storage = Arc::new(Storage { + all: Some(storage), + subgraphs: HashMap::new(), + }); + let invalidation = Invalidation::new(storage.clone()).await?; + Ok(Self { - storage: Some(storage), + storage, entity_type: None, enabled: true, subgraphs: Arc::new(SubgraphConfiguration { diff --git a/apollo-router/src/plugins/cache/invalidation.rs b/apollo-router/src/plugins/cache/invalidation.rs index 4e8e5d5204..a2c2bc80d6 100644 --- a/apollo-router/src/plugins/cache/invalidation.rs +++ b/apollo-router/src/plugins/cache/invalidation.rs @@ -1,3 +1,4 @@ +use std::sync::Arc; use std::time::Instant; use fred::error::RedisError; @@ -13,6 +14,7 @@ use tokio::sync::broadcast; use tower::BoxError; use tracing::Instrument; +use super::entity::Storage as EntityStorage; use crate::cache::redis::RedisCacheStorage; use crate::cache::redis::RedisKey; use crate::notification::Handle; @@ -23,7 +25,6 @@ use crate::Notify; #[derive(Clone)] pub(crate) struct Invalidation { - pub(super) enabled: bool, #[allow(clippy::type_complexity)] pub(super) handle: Handle< InvalidationTopic, @@ -71,18 +72,16 @@ pub(crate) enum InvalidationOrigin { } impl Invalidation { - pub(crate) async fn new(storage: Option) -> Result { + pub(crate) async fn new(storage: Arc) -> Result { let mut notify = Notify::new(None, None, None); let (handle, _b) = notify.create_or_subscribe(InvalidationTopic, false).await?; - let enabled = storage.is_some(); - if let Some(storage) = storage.clone() { - let h = handle.clone(); - tokio::task::spawn(async move { - start(storage, h.into_stream()).await; - }); - } - Ok(Self { enabled, handle }) + let h = handle.clone(); + + tokio::task::spawn(async move { + start(storage, h.into_stream()).await; + }); + Ok(Self { handle }) } pub(crate) async fn invalidate( @@ -90,35 +89,31 @@ impl Invalidation { origin: InvalidationOrigin, requests: Vec, ) -> Result { - if self.enabled { - let mut sink = self.handle.clone().into_sink(); - let (response_tx, mut response_rx) = broadcast::channel(2); - sink.send((requests, origin, response_tx.clone())) - .await - .map_err(|e| format!("cannot send invalidation request: {}", e.message))?; + let mut sink = self.handle.clone().into_sink(); + let (response_tx, mut response_rx) = broadcast::channel(2); + sink.send((requests, origin, response_tx.clone())) + .await + .map_err(|e| format!("cannot send invalidation request: {}", e.message))?; - let result = response_rx - .recv() - .await - .map_err(|err| { - format!( - "cannot receive response for invalidation request: {:?}", - err - ) - })? - .map_err(|err| format!("received an invalidation error: {:?}", err))?; + let result = response_rx + .recv() + .await + .map_err(|err| { + format!( + "cannot receive response for invalidation request: {:?}", + err + ) + })? + .map_err(|err| format!("received an invalidation error: {:?}", err))?; - Ok(result) - } else { - Ok(0) - } + Ok(result) } } // TODO refactor #[allow(clippy::type_complexity)] async fn start( - storage: RedisCacheStorage, + storage: Arc, mut handle: HandleStream< InvalidationTopic, ( @@ -139,6 +134,7 @@ async fn start( 1u64, "origin" = origin ); + if let Err(err) = response_tx.send( handle_request_batch(&storage, origin, requests) .instrument(tracing::info_span!( @@ -218,7 +214,7 @@ async fn handle_request( } async fn handle_request_batch( - storage: &RedisCacheStorage, + storage: &EntityStorage, origin: &'static str, requests: Vec, ) -> Result { @@ -226,7 +222,11 @@ async fn handle_request_batch( let mut errors = Vec::new(); for request in requests { let start = Instant::now(); - match handle_request(storage, origin, &request) + let redis_storage = match storage.get(request.subgraph_name()) { + Some(s) => s, + None => continue, + }; + match handle_request(redis_storage, origin, &request) .instrument(tracing::info_span!("cache.invalidation.request")) .await { diff --git a/apollo-router/src/plugins/cache/invalidation_endpoint.rs b/apollo-router/src/plugins/cache/invalidation_endpoint.rs index 424751c830..561309c8fb 100644 --- a/apollo-router/src/plugins/cache/invalidation_endpoint.rs +++ b/apollo-router/src/plugins/cache/invalidation_endpoint.rs @@ -227,14 +227,12 @@ mod tests { .create_or_subscribe(InvalidationTopic, false) .await .unwrap(); - let invalidation = Invalidation { - enabled: true, - handle, - }; + let invalidation = Invalidation { handle }; let config = Arc::new(SubgraphConfiguration { all: Subgraph { ttl: None, enabled: true, + redis: None, private_id: None, invalidation: Some(SubgraphInvalidationConfig { enabled: true, @@ -319,13 +317,13 @@ mod tests { }); let invalidation = Invalidation { - enabled: true, handle: handle.clone(), }; let config = Arc::new(SubgraphConfiguration { all: Subgraph { ttl: None, enabled: true, + redis: None, private_id: None, invalidation: Some(SubgraphInvalidationConfig { enabled: true, @@ -336,6 +334,7 @@ mod tests { String::from("test"), Subgraph { ttl: None, + redis: None, enabled: true, private_id: None, invalidation: Some(SubgraphInvalidationConfig { @@ -421,14 +420,12 @@ mod tests { .create_or_subscribe(InvalidationTopic, false) .await .unwrap(); - let invalidation = Invalidation { - enabled: true, - handle, - }; + let invalidation = Invalidation { handle }; let config = Arc::new(SubgraphConfiguration { all: Subgraph { ttl: None, enabled: true, + redis: None, private_id: None, invalidation: Some(SubgraphInvalidationConfig { enabled: true, @@ -440,6 +437,7 @@ mod tests { Subgraph { ttl: None, enabled: true, + redis: None, private_id: None, invalidation: Some(SubgraphInvalidationConfig { enabled: true, @@ -520,15 +518,13 @@ mod tests { assert!(called); }); - let invalidation = Invalidation { - enabled: true, - handle, - }; + let invalidation = Invalidation { handle }; let config = Arc::new(SubgraphConfiguration { all: Subgraph { ttl: None, enabled: true, private_id: None, + redis: None, invalidation: Some(SubgraphInvalidationConfig { enabled: true, shared_key: String::from("test"), diff --git a/apollo-router/src/plugins/cache/tests.rs b/apollo-router/src/plugins/cache/tests.rs index 8af136c0c9..f5cb713473 100644 --- a/apollo-router/src/plugins/cache/tests.rs +++ b/apollo-router/src/plugins/cache/tests.rs @@ -398,6 +398,7 @@ async fn private() { ( "user".to_string(), Subgraph { + redis: None, private_id: Some("sub".to_string()), enabled: true, ttl: None, @@ -407,6 +408,7 @@ async fn private() { ( "orga".to_string(), Subgraph { + redis: None, private_id: Some("sub".to_string()), enabled: true, ttl: None, diff --git a/apollo-router/src/uplink/testdata/restricted.router.yaml b/apollo-router/src/uplink/testdata/restricted.router.yaml index 278e6134c8..67c50cac7d 100644 --- a/apollo-router/src/uplink/testdata/restricted.router.yaml +++ b/apollo-router/src/uplink/testdata/restricted.router.yaml @@ -57,11 +57,11 @@ preview_entity_cache: invalidation: listen: 127.0.0.1:4000 path: /invalidation - redis: - urls: - - https://example.com subgraph: all: + redis: + urls: + - https://example.com enabled: false subgraphs: product: diff --git a/apollo-router/tests/integration/redis.rs b/apollo-router/tests/integration/redis.rs index e0cfc0037e..cb8b79959e 100644 --- a/apollo-router/tests/integration/redis.rs +++ b/apollo-router/tests/integration/redis.rs @@ -360,17 +360,17 @@ async fn entity_cache() -> Result<(), BoxError> { .configuration_json(json!({ "preview_entity_cache": { "enabled": true, - "redis": { - "urls": ["redis://127.0.0.1:6379"], - "ttl": "2s" - }, "invalidation": { "listen": "127.0.0.1:4000", "path": "/invalidation" }, "subgraph": { "all": { - "enabled": false + "enabled": false, + "redis": { + "urls": ["redis://127.0.0.1:6379"], + "ttl": "2s" + }, }, "subgraphs": { "products": { @@ -474,10 +474,6 @@ async fn entity_cache() -> Result<(), BoxError> { .configuration_json(json!({ "preview_entity_cache": { "enabled": true, - "redis": { - "urls": ["redis://127.0.0.1:6379"], - "ttl": "2s" - }, "invalidation": { "listen": "127.0.0.1:4000", "path": "/invalidation" @@ -485,6 +481,10 @@ async fn entity_cache() -> Result<(), BoxError> { "subgraph": { "all": { "enabled": false, + "redis": { + "urls": ["redis://127.0.0.1:6379"], + "ttl": "2s" + }, }, "subgraphs": { "products": { @@ -681,10 +681,6 @@ async fn entity_cache_authorization() -> Result<(), BoxError> { .configuration_json(json!({ "preview_entity_cache": { "enabled": true, - "redis": { - "urls": ["redis://127.0.0.1:6379"], - "ttl": "2s" - }, "invalidation": { "listen": "127.0.0.1:4000", "path": "/invalidation" @@ -692,6 +688,10 @@ async fn entity_cache_authorization() -> Result<(), BoxError> { "subgraph": { "all": { "enabled": false, + "redis": { + "urls": ["redis://127.0.0.1:6379"], + "ttl": "2s" + }, }, "subgraphs": { "products": { diff --git a/docs/source/configuration/entity-caching.mdx b/docs/source/configuration/entity-caching.mdx index 655d61dbcf..093ca7b947 100644 --- a/docs/source/configuration/entity-caching.mdx +++ b/docs/source/configuration/entity-caching.mdx @@ -85,16 +85,14 @@ For example: # Enable entity caching globally preview_entity_cache: enabled: true - - # Configure Redis - redis: - urls: ["redis://..."] - timeout: 5ms # Optional, by default: 2ms - ttl: 24h # Optional, by default no expiration - subgraph: all: enabled: true + # Configure Redis + redis: + urls: ["redis://..."] + timeout: 5ms # Optional, by default: 2ms + ttl: 24h # Optional, by default no expiration # Configure entity caching per subgraph, overrides options from the "all" section subgraphs: products: From 2b014dfe4b3ec507b2d5020ba1e81508d526aa33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Wed, 31 Jul 2024 14:59:46 +0200 Subject: [PATCH 023/108] fix: update apollo-compiler (#5753) --- .changesets/fix_renee_operation_variables.md | 13 +++++++ Cargo.lock | 39 ++++--------------- Cargo.toml | 6 +-- .../handles_operations_with_directives.rs | 2 +- examples/supergraph-sdl/rust/Cargo.toml | 2 +- fuzz/Cargo.toml | 4 +- 6 files changed, 28 insertions(+), 38 deletions(-) create mode 100644 .changesets/fix_renee_operation_variables.md diff --git a/.changesets/fix_renee_operation_variables.md b/.changesets/fix_renee_operation_variables.md new file mode 100644 index 0000000000..99a795a190 --- /dev/null +++ b/.changesets/fix_renee_operation_variables.md @@ -0,0 +1,13 @@ +### Fix GraphQL query directives validation bug ([PR #5753](https://github.com/apollographql/router/pull/5753)) + +GraphQL supports an obscure syntax, where a variable is used in a directive application on the same operation where the variable is declared. + +The router used to reject queries like this, but now they are accepted: + +```graphql +query GetSomething($var: Int!) @someDirective(argument: $var) { + something +} +``` + +By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/5753 diff --git a/Cargo.lock b/Cargo.lock index ef632245d5..b66ac53f6e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -395,9 +395,9 @@ checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "apollo-compiler" -version = "1.0.0-beta.19" +version = "1.0.0-beta.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b21b81064ebf506f5a4073f5ef7a3a9cfdba29904814fa3f42612b9055b37f2" +checksum = "07961541ebb5c85cc02ea0f08357e31b30537674bbca818884f1fc658fa99116" dependencies = [ "ahash", "apollo-parser", @@ -412,16 +412,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "apollo-encoder" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee9f27b20841d14923dd5f0714a79f86360b23492d2f98ab5d1651471a56b7a4" -dependencies = [ - "apollo-parser", - "thiserror", -] - [[package]] name = "apollo-federation" version = "1.52.0" @@ -464,9 +454,9 @@ dependencies = [ [[package]] name = "apollo-parser" -version = "0.7.7" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb7c8a9776825e5524b5ab3a7f478bf091a054180f244dff85814452cb87d90" +checksum = "f17a43dc64e71ca7140e646b99bf86ae721ebb801d2aec44e29a654c4d035ab8" dependencies = [ "memchr", "rowan", @@ -647,7 +637,7 @@ version = "1.52.0" dependencies = [ "apollo-parser", "apollo-router", - "apollo-smith 0.5.0", + "apollo-smith", "arbitrary", "criterion", "memory-stats", @@ -691,22 +681,9 @@ dependencies = [ [[package]] name = "apollo-smith" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "441a51f1055d2eebcda41b55066de925502e11c97097c6d1bab0da5bdeb5c70f" -dependencies = [ - "apollo-encoder", - "apollo-parser", - "arbitrary", - "once_cell", - "thiserror", -] - -[[package]] -name = "apollo-smith" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae8c0ec27715028b24a0a98ac53e88ac4a980e6d519cdb37265d2f2c76c864a" +checksum = "84ef0a8fba05f32a14d03eb3ff74f556cecca820012d5846770b839c75332b38" dependencies = [ "apollo-compiler", "apollo-parser", @@ -6144,7 +6121,7 @@ dependencies = [ "apollo-compiler", "apollo-parser", "apollo-router", - "apollo-smith 0.9.0", + "apollo-smith", "async-trait", "env_logger 0.10.2", "http 0.2.12", diff --git a/Cargo.toml b/Cargo.toml index 760349ac52..7c5fe5a189 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,9 +49,9 @@ debug = 1 # Dependencies used in more than one place are specified here in order to keep versions in sync: # https://doc.rust-lang.org/cargo/reference/workspaces.html#the-dependencies-table [workspace.dependencies] -apollo-compiler = "=1.0.0-beta.19" -apollo-parser = "0.7.6" -apollo-smith = { version = "0.5.0", features = ["parser-impl"] } +apollo-compiler = "=1.0.0-beta.20" +apollo-parser = "0.8.0" +apollo-smith = "0.10.0" async-trait = "0.1.77" hex = { version = "0.4.3", features = ["serde"] } http = "0.2.11" diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs index 986c43a0d2..2f344104fc 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs @@ -244,7 +244,7 @@ fn test_if_directives_with_arguments_applied_on_queries_are_ok() { } #[test] -#[should_panic(expected = r#"unused variable: `$some_var`"#)] +#[should_panic(expected = r#"snapshot assertion"#)] // TODO: investigate this failure fn subgraph_query_retains_the_query_variables_used_in_the_directives_applied_to_the_query() { let planner = planner!( diff --git a/examples/supergraph-sdl/rust/Cargo.toml b/examples/supergraph-sdl/rust/Cargo.toml index 2326e049e7..18e0c2d85c 100644 --- a/examples/supergraph-sdl/rust/Cargo.toml +++ b/examples/supergraph-sdl/rust/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] anyhow = "1" -apollo-compiler = "=1.0.0-beta.19" +apollo-compiler = "=1.0.0-beta.20" apollo-router = { path = "../../../apollo-router" } async-trait = "0.1" tower = { version = "0.4", features = ["full"] } diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index 928b68ed73..451ea09375 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -12,8 +12,8 @@ cargo-fuzz = true [dependencies] libfuzzer-sys = "0.4" apollo-compiler.workspace = true -apollo-parser = "0.7.6" -apollo-smith = "0.9.0" +apollo-parser.workspace = true +apollo-smith.workspace = true env_logger = "0.10.2" log = "0.4" reqwest = { workspace = true, features = ["json", "blocking"] } From 2ea66c2f17984fd1567fde5983110ff0fb950e22 Mon Sep 17 00:00:00 2001 From: Jeremy Lempereur Date: Wed, 31 Jul 2024 18:24:37 +0200 Subject: [PATCH 024/108] Allow to use progressive override when using federation 2.7 and above. (#5754) --- .changesets/fix_customer_snore_infant_wrap.md | 5 ++ .../src/plugins/progressive_override/mod.rs | 2 +- .../src/plugins/progressive_override/tests.rs | 47 +++++++++++++++++++ 3 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 .changesets/fix_customer_snore_infant_wrap.md diff --git a/.changesets/fix_customer_snore_infant_wrap.md b/.changesets/fix_customer_snore_infant_wrap.md new file mode 100644 index 0000000000..ead868d4de --- /dev/null +++ b/.changesets/fix_customer_snore_infant_wrap.md @@ -0,0 +1,5 @@ +### Allow to use progressive override with federation 2.7 and above ([PR #5754](https://github.com/apollographql/router/pull/5754)) + +The progressive override feature is now properly available using federation 2.7 and above. + +By [@o0ignition0o](https://github.com/o0ignition0o) in https://github.com/apollographql/router/pull/5754 diff --git a/apollo-router/src/plugins/progressive_override/mod.rs b/apollo-router/src/plugins/progressive_override/mod.rs index bcbf462afd..542b0d0722 100644 --- a/apollo-router/src/plugins/progressive_override/mod.rs +++ b/apollo-router/src/plugins/progressive_override/mod.rs @@ -29,7 +29,7 @@ pub(crate) const LABELS_TO_OVERRIDE_KEY: &str = "apollo_override::labels_to_over pub(crate) const JOIN_FIELD_DIRECTIVE_NAME: &str = "join__field"; pub(crate) const JOIN_SPEC_BASE_URL: &str = "https://specs.apollo.dev/join"; -pub(crate) const JOIN_SPEC_VERSION_RANGE: &str = ">=0.4.0, <=0.4.0"; +pub(crate) const JOIN_SPEC_VERSION_RANGE: &str = ">=0.4"; pub(crate) const OVERRIDE_LABEL_ARG_NAME: &str = "overrideLabel"; /// Configuration for the progressive override plugin diff --git a/apollo-router/src/plugins/progressive_override/tests.rs b/apollo-router/src/plugins/progressive_override/tests.rs index d8b3cb31af..0cead42fd9 100644 --- a/apollo-router/src/plugins/progressive_override/tests.rs +++ b/apollo-router/src/plugins/progressive_override/tests.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use apollo_compiler::Schema; use tower::ServiceExt; use crate::metrics::FutureMetricsExt; @@ -9,6 +10,9 @@ use crate::plugin::Plugin; use crate::plugin::PluginInit; use crate::plugins::progressive_override::Config; use crate::plugins::progressive_override::ProgressiveOverridePlugin; +use crate::plugins::progressive_override::JOIN_FIELD_DIRECTIVE_NAME; +use crate::plugins::progressive_override::JOIN_SPEC_BASE_URL; +use crate::plugins::progressive_override::JOIN_SPEC_VERSION_RANGE; use crate::plugins::progressive_override::LABELS_TO_OVERRIDE_KEY; use crate::plugins::progressive_override::UNRESOLVED_LABELS_KEY; use crate::services::layers::query_analysis::ParsedDocument; @@ -22,6 +26,49 @@ use crate::TestHarness; const SCHEMA: &str = include_str!("testdata/supergraph.graphql"); const SCHEMA_NO_USAGES: &str = include_str!("testdata/supergraph_no_usages.graphql"); +#[test] +fn test_progressive_overrides_are_recognised_vor_join_v0_4_and_above() { + let schema_for_version = |version| { + format!( + r#"schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/{}", for: EXECUTION) + @link(url: "https://specs.apollo.dev/context/v0.1", for: SECURITY) + + directive @join__field repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION"#, + version + ) + }; + + let join_v3_schema = Schema::parse(schema_for_version("v0.3"), "test").unwrap(); + assert!(crate::spec::Schema::directive_name( + &join_v3_schema, + JOIN_SPEC_BASE_URL, + JOIN_SPEC_VERSION_RANGE, + JOIN_FIELD_DIRECTIVE_NAME, + ) + .is_none()); + + let join_v4_schema = Schema::parse(schema_for_version("v0.4"), "test").unwrap(); + assert!(crate::spec::Schema::directive_name( + &join_v4_schema, + JOIN_SPEC_BASE_URL, + JOIN_SPEC_VERSION_RANGE, + JOIN_FIELD_DIRECTIVE_NAME, + ) + .is_some()); + + let join_v5_schema = Schema::parse(schema_for_version("v0.5"), "test").unwrap(); + + assert!(crate::spec::Schema::directive_name( + &join_v5_schema, + JOIN_SPEC_BASE_URL, + JOIN_SPEC_VERSION_RANGE, + JOIN_FIELD_DIRECTIVE_NAME, + ) + .is_some()) +} + #[tokio::test] async fn plugin_disables_itself_with_no_progressive_override_usages() { let plugin = ProgressiveOverridePlugin::new(PluginInit::fake_new( From 0696de67d33a78cefc05b241de8764b43bfe53d1 Mon Sep 17 00:00:00 2001 From: Nick Marsh Date: Thu, 1 Aug 2024 14:44:50 +1000 Subject: [PATCH 025/108] Remove todos and add comment --- apollo-router/src/plugins/telemetry/mod.rs | 6 +++--- apollo-router/src/services/execution/service.rs | 1 - 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index 8d934cf0e5..f348e2969c 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -1526,9 +1526,10 @@ impl Telemetry { // If extended references or enums from responses are populated, we want to add them to the SingleStatsReport let extended_references = context .extensions() - .with_lock(|mut lock| lock.remove::()) + .with_lock(|lock| lock.get::().cloned()) .unwrap_or_default(); - // todo test extended references as well somehow + // Clear the enum values from responses when we send them in a report so that we properly report enum response + // values for deferred responses and subscriptions. let enum_response_references = context .extensions() .with_lock(|mut lock| lock.remove::()) @@ -1595,7 +1596,6 @@ impl Telemetry { } } } else { - // todo is the issue here? // Usage reporting was missing, so it counts as one operation. SingleStatsReport { licensed_operation_count_by_type: LicensedOperationCountByType { diff --git a/apollo-router/src/services/execution/service.rs b/apollo-router/src/services/execution/service.rs index 10990f0c6b..9486e50ec1 100644 --- a/apollo-router/src/services/execution/service.rs +++ b/apollo-router/src/services/execution/service.rs @@ -291,7 +291,6 @@ impl ExecutionService { && response.data.is_none() && response.errors.is_empty() { - // todo: empty signature in this case? return response.into(); } From c2231ce55b2620e25b5960314561c1cb5ecb1ac1 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Thu, 1 Aug 2024 10:30:10 +0200 Subject: [PATCH 026/108] HTTP service fixes (#5449) --- apollo-router/src/router_factory.rs | 3 +-- apollo-router/src/services/http.rs | 12 +++++++----- docs/source/configuration/traffic-shaping.mdx | 6 +++--- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index 5779215a5b..9c3388a963 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -444,8 +444,7 @@ pub(crate) async fn create_subgraph_services( shaping.enable_subgraph_http2(name), )?; - let http_service_factory = - HttpClientServiceFactory::new(Arc::new(http_service), plugins.clone()); + let http_service_factory = HttpClientServiceFactory::new(http_service, plugins.clone()); let subgraph_service = shaping.subgraph_service_internal( name, diff --git a/apollo-router/src/services/http.rs b/apollo-router/src/services/http.rs index 980c20ce70..1fcd4dd701 100644 --- a/apollo-router/src/services/http.rs +++ b/apollo-router/src/services/http.rs @@ -33,12 +33,12 @@ pub(crate) struct HttpResponse { #[derive(Clone)] pub(crate) struct HttpClientServiceFactory { - pub(crate) service: Arc, + pub(crate) service: HttpClientService, pub(crate) plugins: Arc, } impl HttpClientServiceFactory { - pub(crate) fn new(service: Arc, plugins: Arc) -> Self { + pub(crate) fn new(service: HttpClientService, plugins: Arc) -> Self { HttpClientServiceFactory { service, plugins } } @@ -59,17 +59,19 @@ impl HttpClientServiceFactory { .unwrap(); HttpClientServiceFactory { - service: Arc::new(service), + service, plugins: Arc::new(IndexMap::default()), } } pub(crate) fn create(&self, name: &str) -> BoxService { - let service = self.service.make(); + let service = self.service.clone(); self.plugins .iter() .rev() - .fold(service, |acc, (_, e)| e.http_client_service(name, acc)) + .fold(service.boxed(), |acc, (_, e)| { + e.http_client_service(name, acc) + }) } } diff --git a/docs/source/configuration/traffic-shaping.mdx b/docs/source/configuration/traffic-shaping.mdx index dfeb0cb5e1..c6d027fbca 100644 --- a/docs/source/configuration/traffic-shaping.mdx +++ b/docs/source/configuration/traffic-shaping.mdx @@ -192,9 +192,9 @@ Traffic shaping always executes these steps in the same order, to ensure a consi - preparing the subgraph request - variable deduplication -- rate limiting -- request retry -- timeout - query deduplication +- timeout +- request retry +- rate limiting - compression - sending the request to the subgraph From a201ae6a60dc5c9a32706e6b19fa28856af028fa Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Thu, 1 Aug 2024 10:50:43 +0200 Subject: [PATCH 027/108] mark the docs team as owner of .changesets (#5748) --- .github/CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d7c12aae40..6d9f87ef81 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,4 +1,5 @@ /docs/ @apollographql/docs +/.changesets/ @apollographql/docs /apollo-federation/ @dariuszkuc @sachindshinde @goto-bus-stop @SimonSapin @lrlna @TylerBloom @duckki /apollo-federation/src/sources/connect/json_selection @benjamn /apollo-router/ @apollographql/polaris @apollographql/atlas From 49a6d7bbfaa892d09d23648c167fb722841b43b0 Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Thu, 1 Aug 2024 16:17:44 +0200 Subject: [PATCH 028/108] add support of other format for trace id (#5735) Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- .changesets/feat_bnjjj_feat_417.md | 27 ++++++++ ...nfiguration__tests__schema_generation.snap | 49 +++++++------- apollo-router/src/plugins/telemetry/config.rs | 25 +++++++- .../plugins/telemetry/config_new/logging.rs | 42 ++++++++++-- .../plugins/telemetry/config_new/selectors.rs | 51 +++++++++------ .../src/plugins/telemetry/formatters/json.rs | 25 +++++++- .../src/plugins/telemetry/formatters/text.rs | 21 +++++- apollo-router/src/plugins/telemetry/mod.rs | 6 +- apollo-router/tests/common.rs | 3 +- .../telemetry/fixtures/json.uuid.router.yaml | 30 +++++++++ .../telemetry/fixtures/text.uuid.router.yaml | 31 +++++++++ .../tests/integration/telemetry/logging.rs | 64 +++++++++++++++++++ 12 files changed, 321 insertions(+), 53 deletions(-) create mode 100644 .changesets/feat_bnjjj_feat_417.md create mode 100644 apollo-router/tests/integration/telemetry/fixtures/json.uuid.router.yaml create mode 100644 apollo-router/tests/integration/telemetry/fixtures/text.uuid.router.yaml diff --git a/.changesets/feat_bnjjj_feat_417.md b/.changesets/feat_bnjjj_feat_417.md new file mode 100644 index 0000000000..d4aa827ccd --- /dev/null +++ b/.changesets/feat_bnjjj_feat_417.md @@ -0,0 +1,27 @@ +### Add support of other format for trace id in telemetry ([PR #5735](https://github.com/apollographql/router/pull/5735)) + +Currently we support datadog and otel traceID formats and decimal. However we would like to also support UUID. + +Unify the two `TraceIdFormat` enums into a single enum that us used across selectors and experimental_expose_trace id. + +Ensure the following formats are supported: + ++ open_telemetry ++ hexadecimal (same as opentelemetry) ++ decimal ++ datadog ++ uuid (this has dashes) + +Add support for logging to output using `TraceIdFormat` + +```yaml + telemetry: + exporters: + logging: + stdout: + format: + json: + disaplay_trace_id: (true|false|open_telemetry|hexadecimal|decimal|datadog|uuid) +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5735 \ No newline at end of file diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 6e925407a6..9b6dabdb82 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -2114,6 +2114,17 @@ expression: "&schema" ], "type": "string" }, + "DisplayTraceIdFormat": { + "anyOf": [ + { + "$ref": "#/definitions/TraceIdFormat", + "description": "#/definitions/TraceIdFormat" + }, + { + "type": "boolean" + } + ] + }, "Enabled": { "enum": [ "enabled" @@ -2527,8 +2538,8 @@ expression: "&schema" "type": "boolean" }, "format": { - "$ref": "#/definitions/TraceIdFormat2", - "description": "#/definitions/TraceIdFormat2" + "$ref": "#/definitions/TraceIdFormat", + "description": "#/definitions/TraceIdFormat" }, "header_name": { "description": "Choose the header name to expose trace_id (default: apollo-trace-id)", @@ -7146,27 +7157,16 @@ expression: "&schema" "TraceIdFormat": { "oneOf": [ { - "description": "Open Telemetry trace ID, a hex string.", + "description": "Format the Trace ID as a hexadecimal number\n\n(e.g. Trace ID 16 -> 00000000000000000000000000000010)", "enum": [ - "open_telemetry" + "hexadecimal" ], "type": "string" }, - { - "description": "Datadog trace ID, a u64.", - "enum": [ - "datadog" - ], - "type": "string" - } - ] - }, - "TraceIdFormat2": { - "oneOf": [ { "description": "Format the Trace ID as a hexadecimal number\n\n(e.g. Trace ID 16 -> 00000000000000000000000000000010)", "enum": [ - "hexadecimal" + "open_telemetry" ], "type": "string" }, @@ -7183,6 +7183,13 @@ expression: "&schema" "datadog" ], "type": "string" + }, + { + "description": "UUID format with dashes (eg. 67e55044-10b1-426f-9247-bb680e5fe0c8)", + "enum": [ + "uuid" + ], + "type": "string" } ] }, @@ -8131,9 +8138,8 @@ expression: "&schema" "type": "boolean" }, "display_trace_id": { - "default": true, - "description": "Include the trace id (if any) with the log event. (default: true)", - "type": "boolean" + "$ref": "#/definitions/DisplayTraceIdFormat", + "description": "#/definitions/DisplayTraceIdFormat" } }, "type": "object" @@ -8229,9 +8235,8 @@ expression: "&schema" "type": "boolean" }, "display_trace_id": { - "default": false, - "description": "Include the trace id (if any) with the log event. (default: false)", - "type": "boolean" + "$ref": "#/definitions/DisplayTraceIdFormat", + "description": "#/definitions/DisplayTraceIdFormat" } }, "type": "object" diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index 1f36eb4c34..59eee4939a 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -232,14 +232,18 @@ pub(crate) struct ExposeTraceId { pub(crate) format: TraceIdFormat, } -#[derive(Clone, Default, Debug, Deserialize, JsonSchema)] -#[serde(deny_unknown_fields, rename_all = "lowercase")] +#[derive(Clone, Default, Debug, Deserialize, JsonSchema, PartialEq, Eq)] +#[serde(deny_unknown_fields, rename_all = "snake_case")] pub(crate) enum TraceIdFormat { /// Format the Trace ID as a hexadecimal number /// /// (e.g. Trace ID 16 -> 00000000000000000000000000000010) #[default] Hexadecimal, + /// Format the Trace ID as a hexadecimal number + /// + /// (e.g. Trace ID 16 -> 00000000000000000000000000000010) + OpenTelemetry, /// Format the Trace ID as a decimal number /// /// (e.g. Trace ID 16 -> 16) @@ -247,6 +251,23 @@ pub(crate) enum TraceIdFormat { /// Datadog Datadog, + + /// UUID format with dashes + /// (eg. 67e55044-10b1-426f-9247-bb680e5fe0c8) + Uuid, +} + +impl TraceIdFormat { + pub(crate) fn format(&self, trace_id: TraceId) -> String { + match self { + TraceIdFormat::Hexadecimal | TraceIdFormat::OpenTelemetry => { + format!("{:032x}", trace_id) + } + TraceIdFormat::Decimal => format!("{}", u128::from_be_bytes(trace_id.to_bytes())), + TraceIdFormat::Datadog => trace_id.to_datadog(), + TraceIdFormat::Uuid => Uuid::from_bytes(trace_id.to_bytes()).to_string(), + } + } } /// Apollo usage report signature normalization algorithm diff --git a/apollo-router/src/plugins/telemetry/config_new/logging.rs b/apollo-router/src/plugins/telemetry/config_new/logging.rs index 0439142c5e..be9aeefdb4 100644 --- a/apollo-router/src/plugins/telemetry/config_new/logging.rs +++ b/apollo-router/src/plugins/telemetry/config_new/logging.rs @@ -18,6 +18,7 @@ use serde::Deserializer; use crate::configuration::ConfigurationError; use crate::plugins::telemetry::config::AttributeValue; +use crate::plugins::telemetry::config::TraceIdFormat; use crate::plugins::telemetry::config_new::experimental_when_header::HeaderLoggingCondition; use crate::plugins::telemetry::resource::ConfigResource; use crate::services::SupergraphRequest; @@ -335,11 +336,44 @@ pub(crate) struct JsonFormat { /// Include the resource with the log event. (default: true) pub(crate) display_resource: bool, /// Include the trace id (if any) with the log event. (default: true) - pub(crate) display_trace_id: bool, + pub(crate) display_trace_id: DisplayTraceIdFormat, /// Include the span id (if any) with the log event. (default: true) pub(crate) display_span_id: bool, } +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Eq)] +#[serde(deny_unknown_fields, rename_all = "snake_case", untagged)] +pub(crate) enum DisplayTraceIdFormat { + // /// Format the Trace ID as a hexadecimal number + // /// + // /// (e.g. Trace ID 16 -> 00000000000000000000000000000010) + // #[default] + // Hexadecimal, + // /// Format the Trace ID as a hexadecimal number + // /// + // /// (e.g. Trace ID 16 -> 00000000000000000000000000000010) + // OpenTelemetry, + // /// Format the Trace ID as a decimal number + // /// + // /// (e.g. Trace ID 16 -> 16) + // Decimal, + + // /// Datadog + // Datadog, + + // /// UUID format with dashes + // /// (eg. 67e55044-10b1-426f-9247-bb680e5fe0c8) + // Uuid, + TraceIdFormat(TraceIdFormat), + Bool(bool), +} + +impl Default for DisplayTraceIdFormat { + fn default() -> Self { + Self::TraceIdFormat(TraceIdFormat::default()) + } +} + impl Default for JsonFormat { fn default() -> Self { JsonFormat { @@ -353,7 +387,7 @@ impl Default for JsonFormat { display_current_span: false, display_span_list: true, display_resource: true, - display_trace_id: true, + display_trace_id: DisplayTraceIdFormat::Bool(true), display_span_id: true, } } @@ -389,7 +423,7 @@ pub(crate) struct TextFormat { /// Include all of the containing span information with the log event. (default: true) pub(crate) display_span_list: bool, /// Include the trace id (if any) with the log event. (default: false) - pub(crate) display_trace_id: bool, + pub(crate) display_trace_id: DisplayTraceIdFormat, /// Include the span id (if any) with the log event. (default: false) pub(crate) display_span_id: bool, } @@ -410,7 +444,7 @@ impl Default for TextFormat { display_resource: false, display_current_span: true, display_span_list: true, - display_trace_id: false, + display_trace_id: DisplayTraceIdFormat::Bool(false), display_span_id: false, } } diff --git a/apollo-router/src/plugins/telemetry/config_new/selectors.rs b/apollo-router/src/plugins/telemetry/config_new/selectors.rs index 9b373f8ac9..1216afe0f5 100644 --- a/apollo-router/src/plugins/telemetry/config_new/selectors.rs +++ b/apollo-router/src/plugins/telemetry/config_new/selectors.rs @@ -16,13 +16,13 @@ use crate::plugins::cache::entity::CacheSubgraph; use crate::plugins::cache::metrics::CacheMetricContextKey; use crate::plugins::demand_control::CostContext; use crate::plugins::telemetry::config::AttributeValue; +use crate::plugins::telemetry::config::TraceIdFormat; use crate::plugins::telemetry::config_new::cost::CostValue; use crate::plugins::telemetry::config_new::get_baggage; use crate::plugins::telemetry::config_new::instruments::Event; use crate::plugins::telemetry::config_new::instruments::InstrumentValue; use crate::plugins::telemetry::config_new::instruments::Standard; use crate::plugins::telemetry::config_new::trace_id; -use crate::plugins::telemetry::config_new::DatadogId; use crate::plugins::telemetry::config_new::Selector; use crate::plugins::telemetry::config_new::ToOtelValue; use crate::query_planner::APOLLO_OPERATION_ID; @@ -33,15 +33,6 @@ use crate::services::FIRST_EVENT_CONTEXT_KEY; use crate::spec::operation_limits::OperationLimits; use crate::Context; -#[derive(Deserialize, JsonSchema, Clone, Debug, PartialEq)] -#[serde(deny_unknown_fields, rename_all = "snake_case")] -pub(crate) enum TraceIdFormat { - /// Open Telemetry trace ID, a hex string. - OpenTelemetry, - /// Datadog trace ID, a u64. - Datadog, -} - #[derive(Deserialize, JsonSchema, Clone, Debug, PartialEq)] #[serde(deny_unknown_fields, rename_all = "snake_case")] pub(crate) enum OperationName { @@ -681,13 +672,7 @@ impl Selector for RouterSelector { .map(opentelemetry::Value::from), RouterSelector::TraceId { trace_id: trace_id_format, - } => trace_id().map(|id| { - match trace_id_format { - TraceIdFormat::OpenTelemetry => id.to_string(), - TraceIdFormat::Datadog => id.to_datadog(), - } - .into() - }), + } => trace_id().map(|id| trace_id_format.format(id).into()), RouterSelector::Baggage { baggage, default, .. } => get_baggage(baggage).or_else(|| default.maybe_to_otel_value()), @@ -2378,7 +2363,7 @@ mod test { let subscriber = tracing_subscriber::registry().with(otel::layer()); subscriber::with_default(subscriber, || { let selector = RouterSelector::TraceId { - trace_id: TraceIdFormat::OpenTelemetry, + trace_id: TraceIdFormat::Hexadecimal, }; assert_eq!( selector.on_request( @@ -2427,6 +2412,36 @@ mod test { .unwrap(), opentelemetry::Value::String("42".into()) ); + + let selector = RouterSelector::TraceId { + trace_id: TraceIdFormat::Uuid, + }; + + assert_eq!( + selector + .on_request( + &crate::services::RouterRequest::fake_builder() + .build() + .unwrap(), + ) + .unwrap(), + opentelemetry::Value::String("00000000-0000-0000-0000-00000000002a".into()) + ); + + let selector = RouterSelector::TraceId { + trace_id: TraceIdFormat::Decimal, + }; + + assert_eq!( + selector + .on_request( + &crate::services::RouterRequest::fake_builder() + .build() + .unwrap(), + ) + .unwrap(), + opentelemetry::Value::String("42".into()) + ); }); } diff --git a/apollo-router/src/plugins/telemetry/formatters/json.rs b/apollo-router/src/plugins/telemetry/formatters/json.rs index 8b0dd7fcf2..b7952c2701 100644 --- a/apollo-router/src/plugins/telemetry/formatters/json.rs +++ b/apollo-router/src/plugins/telemetry/formatters/json.rs @@ -21,6 +21,8 @@ use super::EventFormatter; use super::APOLLO_PRIVATE_PREFIX; use super::EXCLUDED_ATTRIBUTES; use crate::plugins::telemetry::config::AttributeValue; +use crate::plugins::telemetry::config::TraceIdFormat; +use crate::plugins::telemetry::config_new::logging::DisplayTraceIdFormat; use crate::plugins::telemetry::config_new::logging::JsonFormat; use crate::plugins::telemetry::dynamic_attribute::EventAttributes; use crate::plugins::telemetry::dynamic_attribute::LogAttributes; @@ -227,12 +229,29 @@ where if let Some(ref span) = current_span { if let Some((trace_id, span_id)) = get_trace_and_span_id(span) { - if self.config.display_trace_id { + let trace_id = match self.config.display_trace_id { + DisplayTraceIdFormat::Bool(true) + | DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Hexadecimal) + | DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::OpenTelemetry) => { + Some(TraceIdFormat::Hexadecimal.format(trace_id)) + } + DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Decimal) => { + Some(TraceIdFormat::Decimal.format(trace_id)) + } + DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Datadog) => { + Some(TraceIdFormat::Datadog.format(trace_id)) + } + DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Uuid) => { + Some(TraceIdFormat::Uuid.format(trace_id)) + } + DisplayTraceIdFormat::Bool(false) => None, + }; + if let Some(trace_id) = trace_id { serializer - .serialize_entry("trace_id", &trace_id.to_string()) + .serialize_entry("trace_id", &trace_id) .unwrap_or(()); } - if self.config.display_trace_id { + if self.config.display_span_id { serializer .serialize_entry("span_id", &span_id.to_string()) .unwrap_or(()); diff --git a/apollo-router/src/plugins/telemetry/formatters/text.rs b/apollo-router/src/plugins/telemetry/formatters/text.rs index 4ea440bacc..d809496964 100644 --- a/apollo-router/src/plugins/telemetry/formatters/text.rs +++ b/apollo-router/src/plugins/telemetry/formatters/text.rs @@ -27,6 +27,8 @@ use super::get_trace_and_span_id; use super::EventFormatter; use super::APOLLO_PRIVATE_PREFIX; use super::EXCLUDED_ATTRIBUTES; +use crate::plugins::telemetry::config::TraceIdFormat; +use crate::plugins::telemetry::config_new::logging::DisplayTraceIdFormat; use crate::plugins::telemetry::config_new::logging::TextFormat; use crate::plugins::telemetry::dynamic_attribute::EventAttributes; use crate::plugins::telemetry::dynamic_attribute::LogAttributes; @@ -324,7 +326,24 @@ where if let Some(ref span) = current_span { if let Some((trace_id, span_id)) = get_trace_and_span_id(span) { - if self.config.display_trace_id { + let trace_id = match self.config.display_trace_id { + DisplayTraceIdFormat::Bool(true) + | DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Hexadecimal) + | DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::OpenTelemetry) => { + Some(TraceIdFormat::Hexadecimal.format(trace_id)) + } + DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Decimal) => { + Some(TraceIdFormat::Decimal.format(trace_id)) + } + DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Datadog) => { + Some(TraceIdFormat::Datadog.format(trace_id)) + } + DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Uuid) => { + Some(TraceIdFormat::Uuid.format(trace_id)) + } + DisplayTraceIdFormat::Bool(false) => None, + }; + if let Some(trace_id) = trace_id { write!(writer, "trace_id: {} ", trace_id)?; } if self.config.display_span_id { diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index 42b34cbfae..43c96bf6b1 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -56,6 +56,7 @@ use tokio::runtime::Handle; use tower::BoxError; use tower::ServiceBuilder; use tower::ServiceExt; +use uuid::Uuid; use self::apollo::ForwardValues; use self::apollo::LicensedOperationCountByType; @@ -594,9 +595,10 @@ impl Plugin for Telemetry { // Append the trace ID with the right format, based on the config let format_id = |trace_id: TraceId| { let id = match config.exporters.tracing.response_trace_id.format { - TraceIdFormat::Hexadecimal => format!("{:032x}", trace_id), + TraceIdFormat::Hexadecimal | TraceIdFormat::OpenTelemetry => format!("{:032x}", trace_id), TraceIdFormat::Decimal => format!("{}", u128::from_be_bytes(trace_id.to_bytes())), - TraceIdFormat::Datadog => trace_id.to_datadog() + TraceIdFormat::Datadog => trace_id.to_datadog(), + TraceIdFormat::Uuid => Uuid::from_bytes(trace_id.to_bytes()).to_string(), }; HeaderValue::from_str(&id).ok() diff --git a/apollo-router/tests/common.rs b/apollo-router/tests/common.rs index 6eddc3dc70..8cd6ffc1c0 100644 --- a/apollo-router/tests/common.rs +++ b/apollo-router/tests/common.rs @@ -540,7 +540,7 @@ impl IntegrationTest { async move { let span = info_span!("client_request"); let span_id = span.context().span().span_context().trace_id(); - + dbg!(&span_id); async move { let client = reqwest::Client::new(); @@ -558,6 +558,7 @@ impl IntegrationTest { .build() .unwrap(); telemetry.inject_context(&mut request); + dbg!(&request.headers()); request.headers_mut().remove(ACCEPT); match client.execute(request).await { Ok(response) => (span_id, response), diff --git a/apollo-router/tests/integration/telemetry/fixtures/json.uuid.router.yaml b/apollo-router/tests/integration/telemetry/fixtures/json.uuid.router.yaml new file mode 100644 index 0000000000..7b9a97af99 --- /dev/null +++ b/apollo-router/tests/integration/telemetry/fixtures/json.uuid.router.yaml @@ -0,0 +1,30 @@ +telemetry: + instrumentation: + spans: + mode: spec_compliant + events: + router: + # Standard events + request: info + response: info + error: info + exporters: + tracing: + propagation: + trace_context: true + jaeger: true + jaeger: + enabled: true + batch_processor: + scheduled_delay: 100ms + agent: + endpoint: default + logging: + experimental_when_header: + - name: content-type + value: "application/json" + body: true + stdout: + format: + json: + display_trace_id: uuid diff --git a/apollo-router/tests/integration/telemetry/fixtures/text.uuid.router.yaml b/apollo-router/tests/integration/telemetry/fixtures/text.uuid.router.yaml new file mode 100644 index 0000000000..13b6084b49 --- /dev/null +++ b/apollo-router/tests/integration/telemetry/fixtures/text.uuid.router.yaml @@ -0,0 +1,31 @@ +telemetry: + instrumentation: + spans: + mode: spec_compliant + events: + router: + # Standard events + request: info + response: info + error: info + exporters: + tracing: + propagation: + trace_context: true + jaeger: true + jaeger: + enabled: true + batch_processor: + scheduled_delay: 100ms + agent: + endpoint: default + logging: + experimental_when_header: + - name: content-type + value: "application/json" + body: true + stdout: + format: + text: + display_trace_id: uuid + display_span_id: true diff --git a/apollo-router/tests/integration/telemetry/logging.rs b/apollo-router/tests/integration/telemetry/logging.rs index c0d8998f51..74cecef1c5 100644 --- a/apollo-router/tests/integration/telemetry/logging.rs +++ b/apollo-router/tests/integration/telemetry/logging.rs @@ -1,5 +1,6 @@ use serde_json::json; use tower::BoxError; +use uuid::Uuid; use crate::integration::common::graph_os_enabled; use crate::integration::common::IntegrationTest; @@ -8,6 +9,7 @@ use crate::integration::common::Telemetry; #[tokio::test(flavor = "multi_thread")] async fn test_json() -> Result<(), BoxError> { if !graph_os_enabled() { + eprintln!("test skipped"); return Ok(()); } @@ -34,6 +36,66 @@ async fn test_json() -> Result<(), BoxError> { Ok(()) } +#[tokio::test(flavor = "multi_thread")] +async fn test_json_uuid_format() -> Result<(), BoxError> { + if !graph_os_enabled() { + eprintln!("test skipped"); + return Ok(()); + } + + let mut router = IntegrationTest::builder() + .telemetry(Telemetry::Jaeger) + .config(include_str!("fixtures/json.uuid.router.yaml")) + .build() + .await; + + router.start().await; + router.assert_started().await; + + let query = json!({"query":"query ExampleQuery {topProducts{name}}","variables":{}}); + router.execute_query(&query).await; + router.assert_log_contains("trace_id").await; + let (trace_id, _) = router.execute_query(&query).await; + router + .assert_log_contains(&format!("{}", Uuid::from_bytes(trace_id.to_bytes()))) + .await; + router.execute_query(&query).await; + router.assert_log_contains("span_id").await; + router.graceful_shutdown().await; + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_text_uuid_format() -> Result<(), BoxError> { + if !graph_os_enabled() { + eprintln!("test skipped"); + return Ok(()); + } + + let mut router = IntegrationTest::builder() + .telemetry(Telemetry::Jaeger) + .config(include_str!("fixtures/text.uuid.router.yaml")) + .build() + .await; + + router.start().await; + router.assert_started().await; + + let query = json!({"query":"query ExampleQuery {topProducts{name}}","variables":{}}); + router.execute_query(&query).await; + router.assert_log_contains("trace_id").await; + let (trace_id, _) = router.execute_query(&query).await; + router + .assert_log_contains(&format!("{}", Uuid::from_bytes(trace_id.to_bytes()))) + .await; + router.execute_query(&query).await; + router.assert_log_contains("span_id").await; + router.graceful_shutdown().await; + + Ok(()) +} + #[tokio::test(flavor = "multi_thread")] async fn test_json_sampler_off() -> Result<(), BoxError> { if !graph_os_enabled() { @@ -66,6 +128,7 @@ async fn test_json_sampler_off() -> Result<(), BoxError> { #[tokio::test(flavor = "multi_thread")] async fn test_text() -> Result<(), BoxError> { if !graph_os_enabled() { + eprintln!("test skipped"); return Ok(()); } @@ -93,6 +156,7 @@ async fn test_text() -> Result<(), BoxError> { #[tokio::test(flavor = "multi_thread")] async fn test_text_sampler_off() -> Result<(), BoxError> { if !graph_os_enabled() { + eprintln!("test skipped"); return Ok(()); } From 7d6c1113449fbdb4eeb7390526acf54bb3412240 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Fri, 2 Aug 2024 02:26:18 +0200 Subject: [PATCH 029/108] fix(federation): pass on operation directives to subgraph queries (#5743) --- apollo-federation/src/operation/mod.rs | 155 +++++++++--------- apollo-federation/src/operation/optimize.rs | 2 +- .../src/query_plan/fetch_dependency_graph.rs | 61 ++++--- .../fetch_dependency_graph_processor.rs | 10 +- .../src/query_plan/query_planner.rs | 3 +- .../handles_operations_with_directives.rs | 28 ++-- 6 files changed, 140 insertions(+), 119 deletions(-) diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index c2441530a4..e6f4e688b7 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -3671,97 +3671,108 @@ impl RebasedFragments { // Collect used variables from operation types. -fn collect_variables_from_value<'selection>( - value: &'selection executable::Value, - variables: &mut HashSet<&'selection Name>, -) { - match value { - executable::Value::Variable(v) => { - variables.insert(v); - } - executable::Value::List(list) => { - for value in list { - collect_variables_from_value(value, variables); - } +pub(crate) struct VariableCollector<'s> { + variables: HashSet<&'s Name>, +} + +impl<'s> VariableCollector<'s> { + pub(crate) fn new() -> Self { + Self { + variables: Default::default(), } - executable::Value::Object(object) => { - for (_key, value) in object { - collect_variables_from_value(value, variables); + } + + fn visit_value(&mut self, value: &'s executable::Value) { + match value { + executable::Value::Variable(v) => { + self.variables.insert(v); + } + executable::Value::List(list) => { + for value in list { + self.visit_value(value); + } + } + executable::Value::Object(object) => { + for (_key, value) in object { + self.visit_value(value); + } } + _ => {} } - _ => {} } -} -fn collect_variables_from_directive<'selection>( - directive: &'selection executable::Directive, - variables: &mut HashSet<&'selection Name>, -) { - for arg in directive.arguments.iter() { - collect_variables_from_value(&arg.value, variables); + fn visit_directive(&mut self, directive: &'s executable::Directive) { + for arg in directive.arguments.iter() { + self.visit_value(&arg.value); + } } -} -impl Field { - fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { - for arg in self.arguments.iter() { - collect_variables_from_value(&arg.value, variables); - } - for dir in self.directives.iter() { - collect_variables_from_directive(dir, variables); + pub(crate) fn visit_directive_list(&mut self, directives: &'s executable::DirectiveList) { + for dir in directives.iter() { + self.visit_directive(dir); } } -} -impl FieldSelection { - fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { - self.field.collect_variables(variables); - if let Some(set) = &self.selection_set { - set.collect_variables(variables); + fn visit_field(&mut self, field: &'s Field) { + for arg in field.arguments.iter() { + self.visit_value(&arg.value); } + self.visit_directive_list(&field.directives); } -} -impl InlineFragment { - fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { - for dir in self.directives.iter() { - collect_variables_from_directive(dir, variables); + fn visit_field_selection(&mut self, selection: &'s FieldSelection) { + self.visit_field(&selection.field); + if let Some(set) = &selection.selection_set { + self.visit_selection_set(set); } } -} -impl InlineFragmentSelection { - fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { - self.inline_fragment.collect_variables(variables); - self.selection_set.collect_variables(variables); + fn visit_inline_fragment(&mut self, fragment: &'s InlineFragment) { + self.visit_directive_list(&fragment.directives); + } + + fn visit_inline_fragment_selection(&mut self, selection: &'s InlineFragmentSelection) { + self.visit_inline_fragment(&selection.inline_fragment); + self.visit_selection_set(&selection.selection_set); + } + + fn visit_fragment_spread(&mut self, fragment: &'s FragmentSpread) { + self.visit_directive_list(&fragment.directives); + self.visit_directive_list(&fragment.fragment_directives); } -} -impl FragmentSpread { - fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { - for dir in self.directives.iter() { - collect_variables_from_directive(dir, variables); + fn visit_fragment_spread_selection(&mut self, selection: &'s FragmentSpreadSelection) { + self.visit_fragment_spread(&selection.spread); + self.visit_selection_set(&selection.selection_set); + } + + fn visit_selection(&mut self, selection: &'s Selection) { + match selection { + Selection::Field(field) => self.visit_field_selection(field), + Selection::InlineFragment(frag) => self.visit_inline_fragment_selection(frag), + Selection::FragmentSpread(frag) => self.visit_fragment_spread_selection(frag), } - for dir in self.fragment_directives.iter() { - collect_variables_from_directive(dir, variables); + } + + pub(crate) fn visit_selection_set(&mut self, selection_set: &'s SelectionSet) { + for selection in selection_set.iter() { + self.visit_selection(selection); } } -} -impl FragmentSpreadSelection { - fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { - self.spread.collect_variables(variables); - self.selection_set.collect_variables(variables); + /// Consume the collector and return the collected names. + pub(crate) fn into_inner(self) -> HashSet<&'s Name> { + self.variables } } -impl Selection { - fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { - match self { - Selection::Field(field) => field.collect_variables(variables), - Selection::InlineFragment(frag) => frag.collect_variables(variables), - Selection::FragmentSpread(frag) => frag.collect_variables(variables), - } +impl Fragment { + /// Returns the variable names that are used by this fragment. + pub(crate) fn used_variables(&self) -> HashSet<&'_ Name> { + let mut collector = VariableCollector::new(); + collector.visit_directive_list(&self.directives); + collector.visit_selection_set(&self.selection_set); + collector.into_inner() } } @@ -3769,15 +3780,9 @@ impl SelectionSet { /// Returns the variable names that are used by this selection set, including through fragment /// spreads. pub(crate) fn used_variables(&self) -> HashSet<&'_ Name> { - let mut variables = HashSet::new(); - self.collect_variables(&mut variables); - variables - } - - fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { - for selection in self.selections.values() { - selection.collect_variables(variables); - } + let mut collector = VariableCollector::new(); + collector.visit_selection_set(self); + collector.into_inner() } } diff --git a/apollo-federation/src/operation/optimize.rs b/apollo-federation/src/operation/optimize.rs index 3f21309a10..a4f95f9bb9 100644 --- a/apollo-federation/src/operation/optimize.rs +++ b/apollo-federation/src/operation/optimize.rs @@ -926,7 +926,7 @@ impl SelectionSet { // over fragment reuse, and so we do not want to invest a lot of time into improving // fragment reuse. We do the simple, less-than-ideal thing. if let Some(variable_definitions) = &context.operation_variables { - let fragment_variables = candidate.selection_set.used_variables(); + let fragment_variables = candidate.used_variables(); if fragment_variables .difference(variable_definitions) .next() diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index 5fdfcb99d7..9eb638def7 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -14,6 +14,7 @@ use apollo_compiler::ast::Type; use apollo_compiler::collections::IndexMap; use apollo_compiler::collections::IndexSet; use apollo_compiler::executable; +use apollo_compiler::executable::DirectiveList; use apollo_compiler::executable::VariableDefinition; use apollo_compiler::name; use apollo_compiler::schema; @@ -43,6 +44,7 @@ use crate::operation::Selection; use crate::operation::SelectionId; use crate::operation::SelectionMap; use crate::operation::SelectionSet; +use crate::operation::VariableCollector; use crate::operation::TYPENAME_FIELD; use crate::query_graph::extract_subgraphs_from_supergraph::FEDERATION_REPRESENTATIONS_ARGUMENTS_NAME; use crate::query_graph::extract_subgraphs_from_supergraph::FEDERATION_REPRESENTATIONS_VAR_NAME; @@ -2325,6 +2327,7 @@ impl FetchDependencyGraphNode { query_graph: &QueryGraph, handled_conditions: &Conditions, variable_definitions: &[Node], + operation_directives: &Arc, fragments: Option<&mut RebasedFragments>, operation_name: Option, ) -> Result, FederationError> { @@ -2346,11 +2349,18 @@ impl FetchDependencyGraphNode { .transpose()?; let subgraph_schema = query_graph.schema_by_source(&self.subgraph_name)?; - let variable_usages = { - let set = selection.used_variables(); - let mut list = set.into_iter().cloned().collect::>(); - list.sort(); - list + // Narrow down the variable definitions to only the ones used in the subgraph operation. + let variable_definitions = { + let mut collector = VariableCollector::new(); + collector.visit_directive_list(operation_directives); + collector.visit_selection_set(&selection); + let used_variables = collector.into_inner(); + + variable_definitions + .iter() + .filter(|variable| used_variables.contains(&variable.name)) + .cloned() + .collect::>() }; let mut operation = if self.is_entity_fetch { @@ -2358,6 +2368,7 @@ impl FetchDependencyGraphNode { subgraph_schema, selection, variable_definitions, + operation_directives, &operation_name, )? } else { @@ -2366,6 +2377,7 @@ impl FetchDependencyGraphNode { self.root_kind, selection, variable_definitions, + operation_directives, &operation_name, )? }; @@ -2374,6 +2386,17 @@ impl FetchDependencyGraphNode { { operation.reuse_fragments(fragments)?; } + + let variable_usages = { + let mut list = operation + .variables + .iter() + .map(|variable| variable.name.clone()) + .collect::>(); + list.sort(); + list + }; + let operation_document = operation.try_into()?; let node = super::PlanNode::Fetch(Box::new(super::FetchNode { @@ -2535,19 +2558,11 @@ impl FetchDependencyGraphNode { fn operation_for_entities_fetch( subgraph_schema: &ValidFederationSchema, selection_set: SelectionSet, - all_variable_definitions: &[Node], + mut variable_definitions: Vec>, + operation_directives: &Arc, operation_name: &Option, ) -> Result { - let mut variable_definitions: Vec> = - Vec::with_capacity(all_variable_definitions.len() + 1); - variable_definitions.push(representations_variable_definition(subgraph_schema)?); - let used_variables = selection_set.used_variables(); - variable_definitions.extend( - all_variable_definitions - .iter() - .filter(|definition| used_variables.contains(&definition.name)) - .cloned(), - ); + variable_definitions.insert(0, representations_variable_definition(subgraph_schema)?); let query_type_name = subgraph_schema.schema().root_operation(OperationType::Query).ok_or_else(|| SingleFederationError::InvalidSubgraph { @@ -2611,7 +2626,7 @@ fn operation_for_entities_fetch( root_kind: SchemaRootDefinitionKind::Query, name: operation_name.clone(), variables: Arc::new(variable_definitions), - directives: Default::default(), + directives: Arc::clone(operation_directives), selection_set, named_fragments: Default::default(), }) @@ -2621,22 +2636,16 @@ fn operation_for_query_fetch( subgraph_schema: &ValidFederationSchema, root_kind: SchemaRootDefinitionKind, selection_set: SelectionSet, - variable_definitions: &[Node], + variable_definitions: Vec>, + operation_directives: &Arc, operation_name: &Option, ) -> Result { - let used_variables = selection_set.used_variables(); - let variable_definitions = variable_definitions - .iter() - .filter(|definition| used_variables.contains(&definition.name)) - .cloned() - .collect(); - Ok(Operation { schema: subgraph_schema.clone(), root_kind, name: operation_name.clone(), variables: Arc::new(variable_definitions), - directives: Default::default(), + directives: Arc::clone(operation_directives), selection_set, named_fragments: Default::default(), }) diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs b/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs index 4ee9b57da0..66132a4ce5 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs @@ -1,5 +1,7 @@ use std::collections::HashSet; +use std::sync::Arc; +use apollo_compiler::executable::DirectiveList; use apollo_compiler::executable::VariableDefinition; use apollo_compiler::Name; use apollo_compiler::Node; @@ -44,7 +46,8 @@ const FETCH_COST: QueryPlanCost = 1000.0; const PIPELINING_COST: QueryPlanCost = 100.0; pub(crate) struct FetchDependencyGraphToQueryPlanProcessor { - variable_definitions: Vec>, + variable_definitions: Arc>>, + operation_directives: Arc, fragments: Option, operation_name: Option, assigned_defer_labels: Option>, @@ -241,13 +244,15 @@ fn sequence_cost(values: impl IntoIterator) -> QueryPlanCo impl FetchDependencyGraphToQueryPlanProcessor { pub(crate) fn new( - variable_definitions: Vec>, + variable_definitions: Arc>>, + operation_directives: Arc, fragments: Option, operation_name: Option, assigned_defer_labels: Option>, ) -> Self { Self { variable_definitions, + operation_directives, fragments, operation_name, assigned_defer_labels, @@ -276,6 +281,7 @@ impl FetchDependencyGraphProcessor, DeferredDeferBlock> query_graph, handled_conditions, &self.variable_definitions, + &self.operation_directives, self.fragments.as_mut(), op_name, ) diff --git a/apollo-federation/src/query_plan/query_planner.rs b/apollo-federation/src/query_plan/query_planner.rs index 012dd65392..2a3cb21ef4 100644 --- a/apollo-federation/src/query_plan/query_planner.rs +++ b/apollo-federation/src/query_plan/query_planner.rs @@ -447,7 +447,8 @@ impl QueryPlanner { None }; let mut processor = FetchDependencyGraphToQueryPlanProcessor::new( - operation.variables.clone(), + normalized_operation.variables.clone(), + normalized_operation.directives.clone(), rebased_fragments, operation.name.clone(), assigned_defer_labels, diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs index 2f344104fc..7a79e150b2 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs @@ -39,8 +39,6 @@ const SUBGRAPH_B: &str = r#" "#; #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: investigate this failure (missing directives on fetch operation) fn test_if_directives_at_the_operation_level_are_passed_down_to_subgraph_queries() { let planner = planner!( subgraphA: SUBGRAPH_A, @@ -134,8 +132,8 @@ fn test_if_directives_at_the_operation_level_are_passed_down_to_subgraph_queries insta::assert_snapshot!(b_fetch_nodes[0].operation_document, @r#" query Operation__subgraphB__1($representations: [_Any!]!) @operation { _entities(representations: $representations) { - ... on Foo { - baz @field + ... on T { + f1 @field } } } @@ -144,8 +142,8 @@ fn test_if_directives_at_the_operation_level_are_passed_down_to_subgraph_queries insta::assert_snapshot!(b_fetch_nodes[1].operation_document, @r#" query Operation__subgraphB__2($representations: [_Any!]!) @operation { _entities(representations: $representations) { - ... on T { - f1 @field + ... on Foo { + baz @field } } } @@ -153,8 +151,6 @@ fn test_if_directives_at_the_operation_level_are_passed_down_to_subgraph_queries } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: investigate this failure (missing `mutation` keyword and operation name) fn test_if_directives_on_mutations_are_passed_down_to_subgraph_queries() { let planner = planner!( subgraphA: SUBGRAPH_A, @@ -173,7 +169,7 @@ fn test_if_directives_on_mutations_are_passed_down_to_subgraph_queries() { @r###" QueryPlan { Fetch(service: "subgraphA") { - mutation TestMutation__subgraphA__0 { + { updateFoo(bar: "something") @field { id @field bar @field @@ -198,8 +194,6 @@ fn test_if_directives_on_mutations_are_passed_down_to_subgraph_queries() { } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: investigate this failure (missing directives on fetch query) fn test_if_directives_with_arguments_applied_on_queries_are_ok() { let planner = planner!( Subgraph1: r#" @@ -244,8 +238,6 @@ fn test_if_directives_with_arguments_applied_on_queries_are_ok() { } #[test] -#[should_panic(expected = r#"snapshot assertion"#)] -// TODO: investigate this failure fn subgraph_query_retains_the_query_variables_used_in_the_directives_applied_to_the_query() { let planner = planner!( Subgraph1: r#" @@ -267,7 +259,15 @@ fn subgraph_query_retains_the_query_variables_used_in_the_directives_applied_to_ test } "#, - @r#""# + @r###" + QueryPlan { + Fetch(service: "Subgraph1") { + { + test + } + }, + } + "### ); let fetch_nodes = find_fetch_nodes_for_subgraph("Subgraph1", &plan); From 357fd36b65dd7e9b6fbc5cbd614a4e6b268aa5c3 Mon Sep 17 00:00:00 2001 From: Duckki Oe Date: Fri, 2 Aug 2024 00:33:38 -0700 Subject: [PATCH 030/108] fix(federation): fixed a mismatch after merging PR #5743 (#5763) --- .../src/query_plan/fetch_dependency_graph.rs | 18 ++++++++---------- .../handles_operations_with_directives.rs | 3 +++ 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index 9eb638def7..1b6bedb55b 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -2362,6 +2362,14 @@ impl FetchDependencyGraphNode { .cloned() .collect::>() }; + let variable_usages = { + let mut list = variable_definitions + .iter() + .map(|var_def| var_def.name.clone()) + .collect::>(); + list.sort(); + list + }; let mut operation = if self.is_entity_fetch { operation_for_entities_fetch( @@ -2387,16 +2395,6 @@ impl FetchDependencyGraphNode { operation.reuse_fragments(fragments)?; } - let variable_usages = { - let mut list = operation - .variables - .iter() - .map(|variable| variable.name.clone()) - .collect::>(); - list.sort(); - list - }; - let operation_document = operation.try_into()?; let node = super::PlanNode::Fetch(Box::new(super::FetchNode { diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs index 7a79e150b2..23508b7d02 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs @@ -148,6 +148,9 @@ fn test_if_directives_at_the_operation_level_are_passed_down_to_subgraph_queries } } "#); + // This checks a regression where the `variable_usages` included the `representations` variable. + assert_eq!(b_fetch_nodes[0].variable_usages.len(), 0); + assert_eq!(b_fetch_nodes[1].variable_usages.len(), 0); } #[test] From 3ae756454418d468e54c1df50d075c5a8080ce2a Mon Sep 17 00:00:00 2001 From: Taylor Ninesling Date: Mon, 5 Aug 2024 10:18:34 -0500 Subject: [PATCH 031/108] Add argument cost to type cost in demand control scoring algorithm (#5740) --- ...inesling_demand_control_score_arguments.md | 5 ++ .../fixtures/basic_input_object_query.graphql | 3 ++ .../fixtures/basic_schema.graphql | 37 +++++++++------ .../cost_calculator/static_cost.rs | 47 +++++++++++++++++++ 4 files changed, 78 insertions(+), 14 deletions(-) create mode 100644 .changesets/fix_tninesling_demand_control_score_arguments.md create mode 100644 apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query.graphql diff --git a/.changesets/fix_tninesling_demand_control_score_arguments.md b/.changesets/fix_tninesling_demand_control_score_arguments.md new file mode 100644 index 0000000000..a0c9e2e5d2 --- /dev/null +++ b/.changesets/fix_tninesling_demand_control_score_arguments.md @@ -0,0 +1,5 @@ +### Add argument cost to type cost in demand control scoring algorithm ([PR #5740](https://github.com/apollographql/router/pull/5740)) + +When scoring operations in the demand control plugin, include field arguments in the type cost. + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5740 diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query.graphql new file mode 100644 index 0000000000..86a01356e7 --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query.graphql @@ -0,0 +1,3 @@ +query BasicInputObjectQuery { + getScalarByObject(args: { inner: { id: 1 } }) +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_schema.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_schema.graphql index 716b0b3a3d..d613012b0d 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_schema.graphql +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_schema.graphql @@ -1,29 +1,38 @@ type Query { - getScalar(id: ID): String - anotherScalar: Int - object1: FirstObjectType - interfaceInstance1: MyInterface - someUnion: UnionOfObjectTypes - someObjects: [FirstObjectType] - intList: [Int] + getScalar(id: ID): String + getScalarByObject(args: OuterInput): String + anotherScalar: Int + object1: FirstObjectType + interfaceInstance1: MyInterface + someUnion: UnionOfObjectTypes + someObjects: [FirstObjectType] + intList: [Int] } type Mutation { - doSomething: Int + doSomething: Int } type FirstObjectType { - field1: Int - innerList: [SecondObjectType] + field1: Int + innerList: [SecondObjectType] } interface MyInterface { - field2: String + field2: String } type SecondObjectType implements MyInterface { - field1: Int - field2: String + field1: Int + field2: String } -union UnionOfObjectTypes = FirstObjectType | SecondObjectType \ No newline at end of file +union UnionOfObjectTypes = FirstObjectType | SecondObjectType + +input InnerInput { + id: ID +} + +input OuterInput { + inner: InnerInput +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs index 1156c6df7d..70af911927 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use apollo_compiler::ast::InputValueDefinition; use apollo_compiler::ast::NamedType; use apollo_compiler::executable::ExecutableDocument; use apollo_compiler::executable::Field; @@ -98,6 +99,10 @@ impl StaticCostCalculator { should_estimate_requires, )?; + for argument in &field.definition.arguments { + type_cost += Self::score_argument(argument, schema)?; + } + let mut requirements_cost = 0.0; if should_estimate_requires { // If the field is marked with `@requires`, the required selection may not be included @@ -129,6 +134,40 @@ impl StaticCostCalculator { Ok(cost) } + fn score_argument( + argument: &InputValueDefinition, + schema: &Valid, + ) -> Result { + if let Some(ty) = schema.types.get(argument.ty.inner_named_type().as_str()) { + match ty { + apollo_compiler::schema::ExtendedType::InputObject(inner_arguments) => { + let mut cost = 1.0; + for inner_argument in inner_arguments.fields.values() { + cost += Self::score_argument(inner_argument, schema)?; + } + Ok(cost) + } + + apollo_compiler::schema::ExtendedType::Scalar(_) + | apollo_compiler::schema::ExtendedType::Enum(_) => Ok(0.0), + + apollo_compiler::schema::ExtendedType::Object(_) + | apollo_compiler::schema::ExtendedType::Interface(_) + | apollo_compiler::schema::ExtendedType::Union(_) => { + Err(DemandControlError::QueryParseFailure( + format!("Argument {} has type {}, but objects, interfaces, and unions are disallowed in this position", argument.name, argument.ty.inner_named_type()) + )) + } + } + } else { + Err(DemandControlError::QueryParseFailure(format!( + "Argument {} was found in query, but its type ({}) was not found in the schema", + argument.name, + argument.ty.inner_named_type() + ))) + } + } + fn score_fragment_spread( &self, fragment_spread: &FragmentSpread, @@ -562,6 +601,14 @@ mod tests { assert_eq!(basic_estimated_cost(schema, query), 10100.0) } + #[test] + fn input_object_cost() { + let schema = include_str!("./fixtures/basic_schema.graphql"); + let query = include_str!("./fixtures/basic_input_object_query.graphql"); + + assert_eq!(basic_estimated_cost(schema, query), 2.0) + } + #[test] fn skip_directive_excludes_cost() { let schema = include_str!("./fixtures/basic_schema.graphql"); From 8b9b1a3abd046eda86eff05f602939ae85d77c69 Mon Sep 17 00:00:00 2001 From: Taylor Ninesling Date: Mon, 5 Aug 2024 10:26:42 -0500 Subject: [PATCH 032/108] Extract @cost and @listSize to query planner's subgraph schemas (#5707) --- .../src/link/cost_spec_definition.rs | 202 ++++++++++++++ .../src/link/federation_spec_definition.rs | 29 ++ apollo-federation/src/link/mod.rs | 1 + apollo-federation/src/link/spec.rs | 7 + apollo-federation/src/link/spec_definition.rs | 11 + .../extract_subgraphs_from_supergraph.rs | 199 +++++++++++++- apollo-federation/tests/extract_subgraphs.rs | 256 ++++++++++++++++++ ...tract_subgraphs__can_extract_subgraph.snap | 12 +- ...s__extracts_demand_control_directives.snap | 166 ++++++++++++ ...cts_renamed_demand_control_directives.snap | 166 ++++++++++++ 10 files changed, 1036 insertions(+), 13 deletions(-) create mode 100644 apollo-federation/src/link/cost_spec_definition.rs create mode 100644 apollo-federation/tests/snapshots/main__extract_subgraphs__extracts_demand_control_directives.snap create mode 100644 apollo-federation/tests/snapshots/main__extract_subgraphs__extracts_renamed_demand_control_directives.snap diff --git a/apollo-federation/src/link/cost_spec_definition.rs b/apollo-federation/src/link/cost_spec_definition.rs new file mode 100644 index 0000000000..38e1f94619 --- /dev/null +++ b/apollo-federation/src/link/cost_spec_definition.rs @@ -0,0 +1,202 @@ +use std::collections::HashMap; + +use apollo_compiler::ast::Argument; +use apollo_compiler::ast::Directive; +use apollo_compiler::name; +use apollo_compiler::schema::Component; +use apollo_compiler::schema::EnumType; +use apollo_compiler::schema::ObjectType; +use apollo_compiler::schema::ScalarType; +use apollo_compiler::Name; +use apollo_compiler::Node; +use lazy_static::lazy_static; + +use crate::error::FederationError; +use crate::link::spec::Identity; +use crate::link::spec::Url; +use crate::link::spec::Version; +use crate::link::spec_definition::SpecDefinition; +use crate::link::spec_definition::SpecDefinitions; +use crate::schema::position::EnumTypeDefinitionPosition; +use crate::schema::position::ObjectTypeDefinitionPosition; +use crate::schema::position::ScalarTypeDefinitionPosition; +use crate::schema::FederationSchema; + +pub(crate) const COST_DIRECTIVE_NAME_IN_SPEC: Name = name!("cost"); +pub(crate) const COST_DIRECTIVE_NAME_DEFAULT: Name = name!("federation__cost"); + +pub(crate) const LIST_SIZE_DIRECTIVE_NAME_IN_SPEC: Name = name!("listSize"); +pub(crate) const LIST_SIZE_DIRECTIVE_NAME_DEFAULT: Name = name!("federation__listSize"); + +#[derive(Clone)] +pub(crate) struct CostSpecDefinition { + url: Url, + minimum_federation_version: Option, +} + +macro_rules! propagate_demand_control_directives { + ($func_name:ident, $directives_ty:ty, $wrap_ty:expr) => { + pub(crate) fn $func_name( + &self, + subgraph_schema: &FederationSchema, + source: &$directives_ty, + dest: &mut $directives_ty, + original_directive_names: &HashMap, + ) -> Result<(), FederationError> { + let cost_directive_name = original_directive_names.get(&COST_DIRECTIVE_NAME_IN_SPEC); + if let Some(cost_directive) = source.get( + cost_directive_name + .unwrap_or(&COST_DIRECTIVE_NAME_IN_SPEC) + .as_str(), + ) { + dest.push($wrap_ty(self.cost_directive( + subgraph_schema, + cost_directive.arguments.clone(), + )?)); + } + + let list_size_directive_name = + original_directive_names.get(&LIST_SIZE_DIRECTIVE_NAME_IN_SPEC); + if let Some(list_size_directive) = source.get( + list_size_directive_name + .unwrap_or(&LIST_SIZE_DIRECTIVE_NAME_IN_SPEC) + .as_str(), + ) { + dest.push($wrap_ty(self.list_size_directive( + subgraph_schema, + list_size_directive.arguments.clone(), + )?)); + } + + Ok(()) + } + }; +} + +macro_rules! propagate_demand_control_directives_to_position { + ($func_name:ident, $source_ty:ty, $dest_ty:ty) => { + pub(crate) fn $func_name( + &self, + subgraph_schema: &mut FederationSchema, + source: &Node<$source_ty>, + dest: &$dest_ty, + original_directive_names: &HashMap, + ) -> Result<(), FederationError> { + let cost_directive_name = original_directive_names.get(&COST_DIRECTIVE_NAME_IN_SPEC); + if let Some(cost_directive) = source.directives.get( + cost_directive_name + .unwrap_or(&COST_DIRECTIVE_NAME_IN_SPEC) + .as_str(), + ) { + dest.insert_directive( + subgraph_schema, + Component::from( + self.cost_directive(subgraph_schema, cost_directive.arguments.clone())?, + ), + )?; + } + + let list_size_directive_name = + original_directive_names.get(&LIST_SIZE_DIRECTIVE_NAME_IN_SPEC); + if let Some(list_size_directive) = source.directives.get( + list_size_directive_name + .unwrap_or(&LIST_SIZE_DIRECTIVE_NAME_IN_SPEC) + .as_str(), + ) { + dest.insert_directive( + subgraph_schema, + Component::from(self.list_size_directive( + subgraph_schema, + list_size_directive.arguments.clone(), + )?), + )?; + } + + Ok(()) + } + }; +} + +impl CostSpecDefinition { + pub(crate) fn new(version: Version, minimum_federation_version: Option) -> Self { + Self { + url: Url { + identity: Identity::cost_identity(), + version, + }, + minimum_federation_version, + } + } + + pub(crate) fn cost_directive( + &self, + schema: &FederationSchema, + arguments: Vec>, + ) -> Result { + let name = self + .directive_name_in_schema(schema, &COST_DIRECTIVE_NAME_IN_SPEC)? + .unwrap_or(COST_DIRECTIVE_NAME_DEFAULT); + + Ok(Directive { name, arguments }) + } + + pub(crate) fn list_size_directive( + &self, + schema: &FederationSchema, + arguments: Vec>, + ) -> Result { + let name = self + .directive_name_in_schema(schema, &LIST_SIZE_DIRECTIVE_NAME_IN_SPEC)? + .unwrap_or(LIST_SIZE_DIRECTIVE_NAME_DEFAULT); + + Ok(Directive { name, arguments }) + } + + propagate_demand_control_directives!( + propagate_demand_control_directives, + apollo_compiler::ast::DirectiveList, + Node::new + ); + propagate_demand_control_directives!( + propagate_demand_control_schema_directives, + apollo_compiler::schema::DirectiveList, + Component::from + ); + + propagate_demand_control_directives_to_position!( + propagate_demand_control_directives_for_enum, + EnumType, + EnumTypeDefinitionPosition + ); + propagate_demand_control_directives_to_position!( + propagate_demand_control_directives_for_object, + ObjectType, + ObjectTypeDefinitionPosition + ); + propagate_demand_control_directives_to_position!( + propagate_demand_control_directives_for_scalar, + ScalarType, + ScalarTypeDefinitionPosition + ); +} + +impl SpecDefinition for CostSpecDefinition { + fn url(&self) -> &Url { + &self.url + } + + fn minimum_federation_version(&self) -> Option<&Version> { + self.minimum_federation_version.as_ref() + } +} + +lazy_static! { + pub(crate) static ref COST_VERSIONS: SpecDefinitions = { + let mut definitions = SpecDefinitions::new(Identity::cost_identity()); + definitions.add(CostSpecDefinition::new( + Version { major: 0, minor: 1 }, + Some(Version { major: 2, minor: 9 }), + )); + definitions + }; +} diff --git a/apollo-federation/src/link/federation_spec_definition.rs b/apollo-federation/src/link/federation_spec_definition.rs index b62fb7d762..67f181ec8b 100644 --- a/apollo-federation/src/link/federation_spec_definition.rs +++ b/apollo-federation/src/link/federation_spec_definition.rs @@ -13,6 +13,8 @@ use crate::error::FederationError; use crate::error::SingleFederationError; use crate::link::argument::directive_optional_boolean_argument; use crate::link::argument::directive_required_string_argument; +use crate::link::cost_spec_definition::CostSpecDefinition; +use crate::link::cost_spec_definition::COST_VERSIONS; use crate::link::spec::Identity; use crate::link::spec::Url; use crate::link::spec::Version; @@ -387,6 +389,17 @@ impl FederationSpecDefinition { arguments, }) } + + pub(crate) fn get_cost_spec_definition( + &self, + schema: &FederationSchema, + ) -> Option<&'static CostSpecDefinition> { + schema + .metadata() + .and_then(|metadata| metadata.for_identity(&Identity::cost_identity())) + .and_then(|link| COST_VERSIONS.find(&link.url.version)) + .or_else(|| COST_VERSIONS.find_for_federation_version(self.version())) + } } impl SpecDefinition for FederationSpecDefinition { @@ -426,6 +439,22 @@ lazy_static! { major: 2, minor: 5, })); + definitions.add(FederationSpecDefinition::new(Version { + major: 2, + minor: 6, + })); + definitions.add(FederationSpecDefinition::new(Version { + major: 2, + minor: 7, + })); + definitions.add(FederationSpecDefinition::new(Version { + major: 2, + minor: 8, + })); + definitions.add(FederationSpecDefinition::new(Version { + major: 2, + minor: 9, + })); definitions }; } diff --git a/apollo-federation/src/link/mod.rs b/apollo-federation/src/link/mod.rs index 326eee308f..1fe442ab9f 100644 --- a/apollo-federation/src/link/mod.rs +++ b/apollo-federation/src/link/mod.rs @@ -22,6 +22,7 @@ use crate::link::spec::Identity; use crate::link::spec::Url; pub(crate) mod argument; +pub(crate) mod cost_spec_definition; pub mod database; pub(crate) mod federation_spec_definition; pub(crate) mod graphql_definition; diff --git a/apollo-federation/src/link/spec.rs b/apollo-federation/src/link/spec.rs index 25dd24c4b2..5c1386644b 100644 --- a/apollo-federation/src/link/spec.rs +++ b/apollo-federation/src/link/spec.rs @@ -88,6 +88,13 @@ impl Identity { name: name!("inaccessible"), } } + + pub fn cost_identity() -> Identity { + Identity { + domain: APOLLO_SPEC_DOMAIN.to_string(), + name: name!("cost"), + } + } } /// The version of a `@link` specification, in the form of a major and minor version numbers. diff --git a/apollo-federation/src/link/spec_definition.rs b/apollo-federation/src/link/spec_definition.rs index 5826f8f4d9..1fb084afe5 100644 --- a/apollo-federation/src/link/spec_definition.rs +++ b/apollo-federation/src/link/spec_definition.rs @@ -182,6 +182,17 @@ impl SpecDefinitions { self.definitions.get(requested) } + pub(crate) fn find_for_federation_version(&self, federation_version: &Version) -> Option<&T> { + for definition in self.definitions.values() { + if let Some(minimum_federation_version) = definition.minimum_federation_version() { + if minimum_federation_version >= federation_version { + return Some(definition); + } + } + } + None + } + pub(crate) fn versions(&self) -> Keys { self.definitions.keys() } diff --git a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs b/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs index ebbceade17..23b1b7fbe4 100644 --- a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs +++ b/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs @@ -1,4 +1,5 @@ use std::collections::BTreeMap; +use std::collections::HashMap; use std::fmt; use std::fmt::Write; use std::ops::Deref; @@ -40,6 +41,7 @@ use time::OffsetDateTime; use crate::error::FederationError; use crate::error::MultipleFederationErrors; use crate::error::SingleFederationError; +use crate::link::cost_spec_definition::CostSpecDefinition; use crate::link::federation_spec_definition::get_federation_spec_definition_from_subgraph; use crate::link::federation_spec_definition::FederationSpecDefinition; use crate::link::federation_spec_definition::FEDERATION_VERSIONS; @@ -49,6 +51,7 @@ use crate::link::join_spec_definition::TypeDirectiveArguments; use crate::link::spec::Identity; use crate::link::spec::Version; use crate::link::spec_definition::SpecDefinition; +use crate::link::Link; use crate::link::DEFAULT_LINK_NAME; use crate::schema::field_set::parse_field_set_without_normalization; use crate::schema::position::is_graphql_reserved_name; @@ -234,7 +237,7 @@ pub(crate) fn new_empty_fed_2_subgraph_schema() -> Result Result, } +fn get_original_directive_names( + supergraph_schema: &FederationSchema, +) -> Result, FederationError> { + let mut hm: HashMap = HashMap::new(); + for directive in &supergraph_schema.schema().schema_definition.directives { + if directive.name.as_str() == "link" { + if let Ok(link) = Link::from_directive_application(directive) { + for import in link.imports { + hm.insert(import.element.clone(), import.imported_name().clone()); + } + } + } + } + Ok(hm) +} + fn extract_subgraphs_from_fed_2_supergraph( supergraph_schema: &FederationSchema, subgraphs: &mut FederationSubgraphs, @@ -309,6 +332,8 @@ fn extract_subgraphs_from_fed_2_supergraph( join_spec_definition: &'static JoinSpecDefinition, filtered_types: &Vec, ) -> Result<(), FederationError> { + let original_directive_names = get_original_directive_names(supergraph_schema)?; + let TypeInfos { object_types, interface_types, @@ -322,6 +347,7 @@ fn extract_subgraphs_from_fed_2_supergraph( federation_spec_definitions, join_spec_definition, filtered_types, + &original_directive_names, )?; extract_object_type_content( @@ -331,6 +357,7 @@ fn extract_subgraphs_from_fed_2_supergraph( federation_spec_definitions, join_spec_definition, &object_types, + &original_directive_names, )?; extract_interface_type_content( supergraph_schema, @@ -339,6 +366,7 @@ fn extract_subgraphs_from_fed_2_supergraph( federation_spec_definitions, join_spec_definition, &interface_types, + &original_directive_names, )?; extract_union_type_content( supergraph_schema, @@ -351,15 +379,19 @@ fn extract_subgraphs_from_fed_2_supergraph( supergraph_schema, subgraphs, graph_enum_value_name_to_subgraph_name, + federation_spec_definitions, join_spec_definition, &enum_types, + &original_directive_names, )?; extract_input_object_type_content( supergraph_schema, subgraphs, graph_enum_value_name_to_subgraph_name, + federation_spec_definitions, join_spec_definition, &input_object_types, + &original_directive_names, )?; extract_join_directives( @@ -432,6 +464,7 @@ fn add_all_empty_subgraph_types( federation_spec_definitions: &IndexMap, join_spec_definition: &'static JoinSpecDefinition, filtered_types: &Vec, + original_directive_names: &HashMap, ) -> Result { let type_directive_definition = join_spec_definition.type_directive_definition(supergraph_schema)?; @@ -461,6 +494,13 @@ fn add_all_empty_subgraph_types( graph_enum_value_name_to_subgraph_name, &type_directive_application.graph, )?; + let federation_spec_definition = federation_spec_definitions + .get(&type_directive_application.graph) + .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { + message: "Subgraph unexpectedly does not use federation spec" + .to_owned(), + })?; + pos.pre_insert(&mut subgraph.schema)?; pos.insert( &mut subgraph.schema, @@ -470,6 +510,17 @@ fn add_all_empty_subgraph_types( directives: Default::default(), }), )?; + + if let Some(cost_spec_definition) = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema) + { + cost_spec_definition.propagate_demand_control_directives_for_scalar( + &mut subgraph.schema, + pos.get(supergraph_schema.schema())?, + pos, + original_directive_names, + )?; + } } None } @@ -715,6 +766,7 @@ fn extract_object_type_content( federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], + original_directive_names: &HashMap, ) -> Result<(), FederationError> { let field_directive_definition = join_spec_definition.field_directive_definition(supergraph_schema)?; @@ -764,6 +816,29 @@ fn extract_object_type_content( )?; } + for graph_enum_value in subgraph_info.keys() { + let subgraph = get_subgraph( + subgraphs, + graph_enum_value_name_to_subgraph_name, + graph_enum_value, + )?; + let federation_spec_definition = federation_spec_definitions + .get(graph_enum_value) + .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { + message: "Subgraph unexpectedly does not use federation spec".to_owned(), + })?; + if let Some(cost_spec_definition) = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema) + { + cost_spec_definition.propagate_demand_control_directives_for_object( + &mut subgraph.schema, + type_, + &pos, + original_directive_names, + )?; + } + } + for (field_name, field) in type_.fields.iter() { let field_pos = pos.field(field_name.clone()); let mut field_directive_applications = Vec::new(); @@ -787,6 +862,8 @@ fn extract_object_type_content( message: "Subgraph unexpectedly does not use federation spec" .to_owned(), })?; + let cost_spec_definition = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema); add_subgraph_field( field_pos.clone().into(), field, @@ -794,6 +871,8 @@ fn extract_object_type_content( federation_spec_definition, is_shareable, None, + cost_spec_definition, + original_directive_names, )?; } } else { @@ -824,6 +903,8 @@ fn extract_object_type_content( message: "Subgraph unexpectedly does not use federation spec" .to_owned(), })?; + let cost_spec_definition = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema); if !subgraph_info.contains_key(graph_enum_value) { return Err( SingleFederationError::InvalidFederationSupergraph { @@ -843,6 +924,8 @@ fn extract_object_type_content( federation_spec_definition, is_shareable, Some(field_directive_application), + cost_spec_definition, + original_directive_names, )?; } } @@ -859,6 +942,7 @@ fn extract_interface_type_content( federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], + original_directive_names: &HashMap, ) -> Result<(), FederationError> { let field_directive_definition = join_spec_definition.field_directive_definition(supergraph_schema)?; @@ -981,6 +1065,8 @@ fn extract_interface_type_content( message: "Subgraph unexpectedly does not use federation spec" .to_owned(), })?; + let cost_spec_definition = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema); add_subgraph_field( pos.field(field_name.clone()), field, @@ -988,6 +1074,8 @@ fn extract_interface_type_content( federation_spec_definition, false, None, + cost_spec_definition, + original_directive_names, )?; } } else { @@ -1011,6 +1099,8 @@ fn extract_interface_type_content( message: "Subgraph unexpectedly does not use federation spec" .to_owned(), })?; + let cost_spec_definition = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema); if !subgraph_info.contains_key(graph_enum_value) { return Err( SingleFederationError::InvalidFederationSupergraph { @@ -1030,6 +1120,8 @@ fn extract_interface_type_content( federation_spec_definition, false, Some(field_directive_application), + cost_spec_definition, + original_directive_names, )?; } } @@ -1135,8 +1227,10 @@ fn extract_enum_type_content( supergraph_schema: &FederationSchema, subgraphs: &mut FederationSubgraphs, graph_enum_value_name_to_subgraph_name: &IndexMap>, + federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], + original_directive_names: &HashMap, ) -> Result<(), FederationError> { // This was added in join 0.3, so it can genuinely be None. let enum_value_directive_definition = @@ -1152,6 +1246,29 @@ fn extract_enum_type_content( }; let type_ = pos.get(supergraph_schema.schema())?; + for graph_enum_value in subgraph_info.keys() { + let subgraph = get_subgraph( + subgraphs, + graph_enum_value_name_to_subgraph_name, + graph_enum_value, + )?; + let federation_spec_definition = federation_spec_definitions + .get(graph_enum_value) + .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { + message: "Subgraph unexpectedly does not use federation spec".to_owned(), + })?; + if let Some(cost_spec_definition) = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema) + { + cost_spec_definition.propagate_demand_control_directives_for_enum( + &mut subgraph.schema, + type_, + &pos, + original_directive_names, + )?; + } + } + for (value_name, value) in type_.values.iter() { let value_pos = pos.value(value_name.clone()); let mut enum_value_directive_applications = Vec::new(); @@ -1219,8 +1336,10 @@ fn extract_input_object_type_content( supergraph_schema: &FederationSchema, subgraphs: &mut FederationSubgraphs, graph_enum_value_name_to_subgraph_name: &IndexMap>, + federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], + original_directive_names: &HashMap, ) -> Result<(), FederationError> { let field_directive_definition = join_spec_definition.field_directive_definition(supergraph_schema)?; @@ -1252,7 +1371,22 @@ fn extract_input_object_type_content( graph_enum_value_name_to_subgraph_name, graph_enum_value, )?; - add_subgraph_input_field(input_field_pos.clone(), input_field, subgraph, None)?; + let federation_spec_definition = federation_spec_definitions + .get(graph_enum_value) + .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { + message: "Subgraph unexpectedly does not use federation spec" + .to_owned(), + })?; + let cost_spec_definition = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema); + add_subgraph_input_field( + input_field_pos.clone(), + input_field, + subgraph, + None, + cost_spec_definition, + original_directive_names, + )?; } } else { for field_directive_application in &field_directive_applications { @@ -1267,6 +1401,14 @@ fn extract_input_object_type_content( graph_enum_value_name_to_subgraph_name, graph_enum_value, )?; + let federation_spec_definition = federation_spec_definitions + .get(graph_enum_value) + .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { + message: "Subgraph unexpectedly does not use federation spec" + .to_owned(), + })?; + let cost_spec_definition = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema); if !subgraph_info.contains_key(graph_enum_value) { return Err( SingleFederationError::InvalidFederationSupergraph { @@ -1284,6 +1426,8 @@ fn extract_input_object_type_content( input_field, subgraph, Some(field_directive_application), + cost_spec_definition, + original_directive_names, )?; } } @@ -1293,6 +1437,7 @@ fn extract_input_object_type_content( Ok(()) } +#[allow(clippy::too_many_arguments)] fn add_subgraph_field( object_or_interface_field_definition_position: ObjectOrInterfaceFieldDefinitionPosition, field: &FieldDefinition, @@ -1300,6 +1445,8 @@ fn add_subgraph_field( federation_spec_definition: &'static FederationSpecDefinition, is_shareable: bool, field_directive_application: Option<&FieldDirectiveArguments>, + cost_spec_definition: Option<&'static CostSpecDefinition>, + original_directive_names: &HashMap, ) -> Result<(), FederationError> { let field_directive_application = field_directive_application.unwrap_or_else(|| &FieldDirectiveArguments { @@ -1327,15 +1474,25 @@ fn add_subgraph_field( }; for argument in &field.arguments { + let mut destination_argument = InputValueDefinition { + description: None, + name: argument.name.clone(), + ty: argument.ty.clone(), + default_value: argument.default_value.clone(), + directives: Default::default(), + }; + if let Some(cost_spec_definition) = cost_spec_definition { + cost_spec_definition.propagate_demand_control_directives( + &subgraph.schema, + &argument.directives, + &mut destination_argument.directives, + original_directive_names, + )?; + } + subgraph_field .arguments - .push(Node::new(InputValueDefinition { - description: None, - name: argument.name.clone(), - ty: argument.ty.clone(), - default_value: argument.default_value.clone(), - directives: Default::default(), - })) + .push(Node::new(destination_argument)) } if let Some(requires) = &field_directive_application.requires { subgraph_field.directives.push(Node::new( @@ -1377,6 +1534,15 @@ fn add_subgraph_field( )); } + if let Some(cost_spec_definition) = cost_spec_definition { + cost_spec_definition.propagate_demand_control_directives( + &subgraph.schema, + &field.directives, + &mut subgraph_field.directives, + original_directive_names, + )?; + } + match object_or_interface_field_definition_position { ObjectOrInterfaceFieldDefinitionPosition::Object(pos) => { pos.insert(&mut subgraph.schema, Component::from(subgraph_field))?; @@ -1394,6 +1560,8 @@ fn add_subgraph_input_field( input_field: &InputValueDefinition, subgraph: &mut FederationSubgraph, field_directive_application: Option<&FieldDirectiveArguments>, + cost_spec_definition: Option<&'static CostSpecDefinition>, + original_directive_names: &HashMap, ) -> Result<(), FederationError> { let field_directive_application = field_directive_application.unwrap_or_else(|| &FieldDirectiveArguments { @@ -1410,7 +1578,7 @@ fn add_subgraph_input_field( Some(t) => Node::new(decode_type(t)?), None => input_field.ty.clone(), }; - let subgraph_input_field = InputValueDefinition { + let mut subgraph_input_field = InputValueDefinition { description: None, name: input_object_field_definition_position.field_name.clone(), ty: subgraph_input_field_type, @@ -1418,6 +1586,15 @@ fn add_subgraph_input_field( directives: Default::default(), }; + if let Some(cost_spec_definition) = cost_spec_definition { + cost_spec_definition.propagate_demand_control_directives( + &subgraph.schema, + &input_field.directives, + &mut subgraph_input_field.directives, + original_directive_names, + )?; + } + input_object_field_definition_position .insert(&mut subgraph.schema, Component::from(subgraph_input_field))?; @@ -2963,6 +3140,6 @@ mod tests { .unwrap(); let subgraph = subgraphs.get("subgraph").unwrap(); - assert_snapshot!(subgraph.schema.schema().schema_definition.directives, @r###" @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.5") @link(url: "https://specs.apollo.dev/hello/v0.1", import: ["@hello"])"###); + assert_snapshot!(subgraph.schema.schema().schema_definition.directives, @r###" @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") @link(url: "https://specs.apollo.dev/hello/v0.1", import: ["@hello"])"###); } } diff --git a/apollo-federation/tests/extract_subgraphs.rs b/apollo-federation/tests/extract_subgraphs.rs index 51a505345f..a3316e895d 100644 --- a/apollo-federation/tests/extract_subgraphs.rs +++ b/apollo-federation/tests/extract_subgraphs.rs @@ -255,3 +255,259 @@ fn erase_empty_types_due_to_overridden_fields() { .schema(); assert!(!b.types.contains_key("User")); } + +#[test] +fn extracts_demand_control_directives() { + let subgraphs = Supergraph::new(r#" + schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@cost", "@listSize"]) + { + query: Query + } + + directive @cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + + directive @cost__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + + directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + + directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + + directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + + directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + + directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + directive @listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + + enum AorB + @join__type(graph: SUBGRAPHWITHCOST) + @cost(weight: 15) + { + A @join__enumValue(graph: SUBGRAPHWITHCOST) + B @join__enumValue(graph: SUBGRAPHWITHCOST) + } + + scalar ExpensiveInt + @join__type(graph: SUBGRAPHWITHCOST) + @cost(weight: 30) + + type ExpensiveObject + @join__type(graph: SUBGRAPHWITHCOST) + @cost(weight: 40) + { + id: ID + } + + type HasInts + @join__type(graph: SUBGRAPHWITHLISTSIZE) + { + ints: [Int!] + } + + input InputTypeWithCost + @join__type(graph: SUBGRAPHWITHCOST) + { + somethingWithCost: Int @cost(weight: 20) + } + + input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! + } + + scalar join__DirectiveArguments + + scalar join__FieldSet + + scalar join__FieldValue + + enum join__Graph { + SUBGRAPHWITHCOST @join__graph(name: "subgraphWithCost", url: "") + SUBGRAPHWITHLISTSIZE @join__graph(name: "subgraphWithListSize", url: "") + } + + scalar link__Import + + enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + type Query + @join__type(graph: SUBGRAPHWITHCOST) + @join__type(graph: SUBGRAPHWITHLISTSIZE) + { + fieldWithCost: Int @join__field(graph: SUBGRAPHWITHCOST) @cost(weight: 5) + argWithCost(arg: Int @cost(weight: 10)): Int @join__field(graph: SUBGRAPHWITHCOST) + enumWithCost: AorB @join__field(graph: SUBGRAPHWITHCOST) + inputWithCost(someInput: InputTypeWithCost): Int @join__field(graph: SUBGRAPHWITHCOST) + scalarWithCost: ExpensiveInt @join__field(graph: SUBGRAPHWITHCOST) + objectWithCost: ExpensiveObject @join__field(graph: SUBGRAPHWITHCOST) + fieldWithListSize: [String!] @join__field(graph: SUBGRAPHWITHLISTSIZE) @listSize(assumedSize: 2000, requireOneSlicingArgument: false) + fieldWithDynamicListSize(first: Int!): HasInts @join__field(graph: SUBGRAPHWITHLISTSIZE) @listSize(slicingArguments: ["first"], sizedFields: ["ints"], requireOneSlicingArgument: true) + } + "#) + .expect("is supergraph") + .extract_subgraphs() + .expect("extracts subgraphs"); + + let mut snapshot = String::new(); + for (_name, subgraph) in subgraphs { + use std::fmt::Write; + + _ = writeln!( + &mut snapshot, + "{}\n---\n{}", + subgraph.name, + subgraph.schema.schema() + ); + } + insta::assert_snapshot!(snapshot); +} + +#[test] +fn extracts_renamed_demand_control_directives() { + let subgraphs = Supergraph::new(r#" + schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link(url: "https://specs.apollo.dev/cost/v0.1", import: [{name: "@cost", as: "@renamedCost"}, {name: "@listSize", as: "@renamedListSize"}]) + { + query: Query + } + + directive @cost__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + + directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + + directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + + directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + + directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + + directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + directive @renamedCost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + + directive @renamedListSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + + enum AorB + @join__type(graph: SUBGRAPHWITHCOST) + @renamedCost(weight: 15) + { + A @join__enumValue(graph: SUBGRAPHWITHCOST) + B @join__enumValue(graph: SUBGRAPHWITHCOST) + } + + scalar ExpensiveInt + @join__type(graph: SUBGRAPHWITHCOST) + @renamedCost(weight: 30) + + type ExpensiveObject + @join__type(graph: SUBGRAPHWITHCOST) + @renamedCost(weight: 40) + { + id: ID + } + + type HasInts + @join__type(graph: SUBGRAPHWITHLISTSIZE) + { + ints: [Int!] + } + + input InputTypeWithCost + @join__type(graph: SUBGRAPHWITHCOST) + { + somethingWithCost: Int @renamedCost(weight: 20) + } + + input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! + } + + scalar join__DirectiveArguments + + scalar join__FieldSet + + scalar join__FieldValue + + enum join__Graph { + SUBGRAPHWITHCOST @join__graph(name: "subgraphWithCost", url: "") + SUBGRAPHWITHLISTSIZE @join__graph(name: "subgraphWithListSize", url: "") + } + + scalar link__Import + + enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + type Query + @join__type(graph: SUBGRAPHWITHCOST) + @join__type(graph: SUBGRAPHWITHLISTSIZE) + { + fieldWithCost: Int @join__field(graph: SUBGRAPHWITHCOST) @renamedCost(weight: 5) + argWithCost(arg: Int @renamedCost(weight: 10)): Int @join__field(graph: SUBGRAPHWITHCOST) + enumWithCost: AorB @join__field(graph: SUBGRAPHWITHCOST) + inputWithCost(someInput: InputTypeWithCost): Int @join__field(graph: SUBGRAPHWITHCOST) + scalarWithCost: ExpensiveInt @join__field(graph: SUBGRAPHWITHCOST) + objectWithCost: ExpensiveObject @join__field(graph: SUBGRAPHWITHCOST) + fieldWithListSize: [String!] @join__field(graph: SUBGRAPHWITHLISTSIZE) @renamedListSize(assumedSize: 2000, requireOneSlicingArgument: false) + fieldWithDynamicListSize(first: Int!): HasInts @join__field(graph: SUBGRAPHWITHLISTSIZE) @renamedListSize(slicingArguments: ["first"], sizedFields: ["ints"], requireOneSlicingArgument: true) + } + "#) + .expect("parses") + .extract_subgraphs() + .expect("extracts"); + + let mut snapshot = String::new(); + for (_name, subgraph) in subgraphs { + use std::fmt::Write; + + _ = writeln!( + &mut snapshot, + "{}\n---\n{}", + subgraph.name, + subgraph.schema.schema() + ); + } + insta::assert_snapshot!(snapshot); +} diff --git a/apollo-federation/tests/snapshots/main__extract_subgraphs__can_extract_subgraph.snap b/apollo-federation/tests/snapshots/main__extract_subgraphs__can_extract_subgraph.snap index f036c14999..324709bd56 100644 --- a/apollo-federation/tests/snapshots/main__extract_subgraphs__can_extract_subgraph.snap +++ b/apollo-federation/tests/snapshots/main__extract_subgraphs__can_extract_subgraph.snap @@ -8,7 +8,7 @@ schema { query: Query } -extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.5") +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA @@ -38,6 +38,10 @@ directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + scalar link__Import enum link__Purpose { @@ -85,7 +89,7 @@ schema { query: Query } -extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.5") +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA @@ -115,6 +119,10 @@ directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + scalar link__Import enum link__Purpose { diff --git a/apollo-federation/tests/snapshots/main__extract_subgraphs__extracts_demand_control_directives.snap b/apollo-federation/tests/snapshots/main__extract_subgraphs__extracts_demand_control_directives.snap new file mode 100644 index 0000000000..319b91d908 --- /dev/null +++ b/apollo-federation/tests/snapshots/main__extract_subgraphs__extracts_demand_control_directives.snap @@ -0,0 +1,166 @@ +--- +source: apollo-federation/tests/extract_subgraphs.rs +expression: snapshot +--- +subgraphWithCost +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +enum AorB @federation__cost(weight: 15) { + A + B +} + +scalar ExpensiveInt @federation__cost(weight: 30) + +type ExpensiveObject @federation__cost(weight: 40) { + id: ID +} + +input InputTypeWithCost { + somethingWithCost: Int @federation__cost(weight: 20) +} + +type Query { + fieldWithCost: Int @federation__cost(weight: 5) + argWithCost( + arg: Int @federation__cost(weight: 10), + ): Int + enumWithCost: AorB + inputWithCost(someInput: InputTypeWithCost): Int + scalarWithCost: ExpensiveInt + objectWithCost: ExpensiveObject + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} + +subgraphWithListSize +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +type HasInts { + ints: [Int!] +} + +type Query { + fieldWithListSize: [String!] @federation__listSize(assumedSize: 2000, requireOneSlicingArgument: false) + fieldWithDynamicListSize(first: Int!): HasInts @federation__listSize(slicingArguments: ["first"], sizedFields: ["ints"], requireOneSlicingArgument: true) + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} diff --git a/apollo-federation/tests/snapshots/main__extract_subgraphs__extracts_renamed_demand_control_directives.snap b/apollo-federation/tests/snapshots/main__extract_subgraphs__extracts_renamed_demand_control_directives.snap new file mode 100644 index 0000000000..319b91d908 --- /dev/null +++ b/apollo-federation/tests/snapshots/main__extract_subgraphs__extracts_renamed_demand_control_directives.snap @@ -0,0 +1,166 @@ +--- +source: apollo-federation/tests/extract_subgraphs.rs +expression: snapshot +--- +subgraphWithCost +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +enum AorB @federation__cost(weight: 15) { + A + B +} + +scalar ExpensiveInt @federation__cost(weight: 30) + +type ExpensiveObject @federation__cost(weight: 40) { + id: ID +} + +input InputTypeWithCost { + somethingWithCost: Int @federation__cost(weight: 20) +} + +type Query { + fieldWithCost: Int @federation__cost(weight: 5) + argWithCost( + arg: Int @federation__cost(weight: 10), + ): Int + enumWithCost: AorB + inputWithCost(someInput: InputTypeWithCost): Int + scalarWithCost: ExpensiveInt + objectWithCost: ExpensiveObject + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} + +subgraphWithListSize +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +type HasInts { + ints: [Int!] +} + +type Query { + fieldWithListSize: [String!] @federation__listSize(assumedSize: 2000, requireOneSlicingArgument: false) + fieldWithDynamicListSize(first: Int!): HasInts @federation__listSize(slicingArguments: ["first"], sizedFields: ["ints"], requireOneSlicingArgument: true) + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} From c30de6e223d9a92d47ac2fff53f2eca821105a3b Mon Sep 17 00:00:00 2001 From: Simon Sapin Date: Mon, 5 Aug 2024 17:28:02 +0200 Subject: [PATCH 033/108] Share a single Rust QP instance when using a pool of JS workers (#5771) Config `supergraph.query_planning.experimental_parallelism` defaults to 1, but when set to something else `BridgeQueryPlannerPool` creates that many instances of `BridgeQueryPlanner` which in turn create a JS worker each. If config `experimental_query_planner_mode` is also set to `new` or `both` (current default is `legacy`), the each `BridgeQueryPlanner` would also unnecessarily create its own instance of the Rust `QueryPlanner` struct. Instead, a single `QueryPlanner` is now shared with `Arc`. Unlike JS workers, `QueryPlanner` only contains read-only data and can safely be shared between threads. This should reduce memory use when both `supergraph.query_planning.experimental_parallelism` and `experimental_query_planner_mode` are configured to non-default values. --- .../cost_calculator/static_cost.rs | 2 +- apollo-router/src/plugins/test.rs | 9 +++-- .../src/query_planner/bridge_query_planner.rs | 38 ++++++++++++++----- .../bridge_query_planner_pool.rs | 18 +++++---- 4 files changed, 46 insertions(+), 21 deletions(-) diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs index 70af911927..7601ba71e5 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs @@ -503,7 +503,7 @@ mod tests { let config: Arc = Arc::new(Default::default()); let (schema, query) = parse_schema_and_operation(schema_str, query_str, &config); - let mut planner = BridgeQueryPlanner::new(schema.into(), config.clone(), None) + let mut planner = BridgeQueryPlanner::new(schema.into(), config.clone(), None, None) .await .unwrap(); diff --git a/apollo-router/src/plugins/test.rs b/apollo-router/src/plugins/test.rs index 523e07f253..700e1ce921 100644 --- a/apollo-router/src/plugins/test.rs +++ b/apollo-router/src/plugins/test.rs @@ -13,6 +13,7 @@ use crate::plugin::DynPlugin; use crate::plugin::Plugin; use crate::plugin::PluginInit; use crate::query_planner::BridgeQueryPlanner; +use crate::query_planner::PlannerMode; use crate::services::execution; use crate::services::http; use crate::services::router; @@ -92,9 +93,11 @@ impl PluginTestHarness { let schema = Schema::parse(schema, &config).unwrap(); let sdl = schema.raw_sdl.clone(); let supergraph = schema.supergraph_schema().clone(); - let planner = BridgeQueryPlanner::new(schema.into(), Arc::new(config), None) - .await - .unwrap(); + let rust_planner = PlannerMode::maybe_rust(&schema, &config).unwrap(); + let planner = + BridgeQueryPlanner::new(schema.into(), Arc::new(config), None, rust_planner) + .await + .unwrap(); (sdl, supergraph, planner.subgraph_schemas()) } else { ( diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index f2348ef11b..d4fcb1331c 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -81,7 +81,7 @@ pub(crate) struct BridgeQueryPlanner { } #[derive(Clone)] -enum PlannerMode { +pub(crate) enum PlannerMode { Js(Arc>), Both { js: Arc>, @@ -115,6 +115,7 @@ impl PlannerMode { schema: &Schema, configuration: &Configuration, old_planner: Option>>, + rust_planner: Option>, ) -> Result { Ok(match configuration.experimental_query_planner_mode { QueryPlannerMode::New => Self::Rust { @@ -124,18 +125,33 @@ impl PlannerMode { old_planner, ) .await?, - rust: Self::rust(schema, configuration)?, + rust: rust_planner + .expect("expected Rust QP instance for `experimental_query_planner_mode: new`"), }, QueryPlannerMode::Legacy => { Self::Js(Self::js(&schema.raw_sdl, configuration, old_planner).await?) } QueryPlannerMode::Both => Self::Both { js: Self::js(&schema.raw_sdl, configuration, old_planner).await?, - rust: Self::rust(schema, configuration)?, + rust: rust_planner.expect( + "expected Rust QP instance for `experimental_query_planner_mode: both`", + ), }, }) } + pub(crate) fn maybe_rust( + schema: &Schema, + configuration: &Configuration, + ) -> Result>, ServiceBuildError> { + match configuration.experimental_query_planner_mode { + QueryPlannerMode::Legacy => Ok(None), + QueryPlannerMode::New | QueryPlannerMode::Both => { + Ok(Some(Self::rust(schema, configuration)?)) + } + } + } + fn rust( schema: &Schema, configuration: &Configuration, @@ -325,9 +341,11 @@ impl BridgeQueryPlanner { pub(crate) async fn new( schema: Arc, configuration: Arc, - old_planner: Option>>, + old_js_planner: Option>>, + rust_planner: Option>, ) -> Result { - let planner = PlannerMode::new(&schema, &configuration, old_planner).await?; + let planner = + PlannerMode::new(&schema, &configuration, old_js_planner, rust_planner).await?; let subgraph_schemas = Arc::new(planner.subgraphs().await?); @@ -991,7 +1009,7 @@ mod tests { let sdl = include_str!("../testdata/minimal_supergraph.graphql"); let config = Arc::default(); let schema = Schema::parse(sdl, &config).unwrap(); - let _planner = BridgeQueryPlanner::new(schema.into(), config, None) + let _planner = BridgeQueryPlanner::new(schema.into(), config, None, None) .await .unwrap(); @@ -1008,7 +1026,7 @@ mod tests { let sdl = include_str!("../testdata/minimal_fed2_supergraph.graphql"); let config = Arc::default(); let schema = Schema::parse(sdl, &config).unwrap(); - let _planner = BridgeQueryPlanner::new(schema.into(), config, None) + let _planner = BridgeQueryPlanner::new(schema.into(), config, None, None) .await .unwrap(); @@ -1027,7 +1045,7 @@ mod tests { let schema = Arc::new(Schema::parse(EXAMPLE_SCHEMA, &Default::default()).unwrap()); let query = include_str!("testdata/unknown_introspection_query.graphql"); - let planner = BridgeQueryPlanner::new(schema.clone(), Default::default(), None) + let planner = BridgeQueryPlanner::new(schema.clone(), Default::default(), None, None) .await .unwrap(); @@ -1127,7 +1145,7 @@ mod tests { let configuration = Arc::new(configuration); let schema = Schema::parse(EXAMPLE_SCHEMA, &configuration).unwrap(); - let planner = BridgeQueryPlanner::new(schema.into(), configuration.clone(), None) + let planner = BridgeQueryPlanner::new(schema.into(), configuration.clone(), None, None) .await .unwrap(); @@ -1435,7 +1453,7 @@ mod tests { let configuration = Arc::new(configuration); let schema = Schema::parse(schema, &configuration).unwrap(); - let planner = BridgeQueryPlanner::new(schema.into(), configuration.clone(), None) + let planner = BridgeQueryPlanner::new(schema.into(), configuration.clone(), None, None) .await .unwrap(); diff --git a/apollo-router/src/query_planner/bridge_query_planner_pool.rs b/apollo-router/src/query_planner/bridge_query_planner_pool.rs index 19e80b539e..a306f19b6b 100644 --- a/apollo-router/src/query_planner/bridge_query_planner_pool.rs +++ b/apollo-router/src/query_planner/bridge_query_planner_pool.rs @@ -19,6 +19,7 @@ use super::QueryPlanResult; use crate::error::QueryPlannerError; use crate::error::ServiceBuildError; use crate::metrics::meter_provider; +use crate::query_planner::PlannerMode; use crate::services::QueryPlannerRequest; use crate::services::QueryPlannerResponse; use crate::spec::Schema; @@ -28,7 +29,7 @@ static CHANNEL_SIZE: usize = 1_000; #[derive(Clone)] pub(crate) struct BridgeQueryPlannerPool { - planners: Vec>>, + js_planners: Vec>>, sender: Sender<( QueryPlannerRequest, oneshot::Sender>, @@ -48,11 +49,13 @@ impl BridgeQueryPlannerPool { } pub(crate) async fn new_from_planners( - old_planners: Vec>>, + old_js_planners: Vec>>, schema: Arc, configuration: Arc, size: NonZeroUsize, ) -> Result { + let rust_planner = PlannerMode::maybe_rust(&schema, &configuration)?; + let mut join_set = JoinSet::new(); let (sender, receiver) = bounded::<( @@ -60,15 +63,16 @@ impl BridgeQueryPlannerPool { oneshot::Sender>, )>(CHANNEL_SIZE); - let mut old_planners_iterator = old_planners.into_iter(); + let mut old_js_planners_iterator = old_js_planners.into_iter(); (0..size.into()).for_each(|_| { let schema = schema.clone(); let configuration = configuration.clone(); + let rust_planner = rust_planner.clone(); - let old_planner = old_planners_iterator.next(); + let old_planner = old_js_planners_iterator.next(); join_set.spawn(async move { - BridgeQueryPlanner::new(schema, configuration, old_planner).await + BridgeQueryPlanner::new(schema, configuration, old_planner, rust_planner).await }); }); @@ -122,7 +126,7 @@ impl BridgeQueryPlannerPool { .init(); Ok(Self { - planners, + js_planners: planners, sender, schema, subgraph_schemas, @@ -131,7 +135,7 @@ impl BridgeQueryPlannerPool { } pub(crate) fn planners(&self) -> Vec>> { - self.planners.clone() + self.js_planners.clone() } pub(crate) fn schema(&self) -> Arc { From 1c469ccec82f12bb152022871b869c3f12f36358 Mon Sep 17 00:00:00 2001 From: Duckki Oe Date: Mon, 5 Aug 2024 08:51:22 -0700 Subject: [PATCH 034/108] fix(federation): fixed the `is_descendant_of` function (#5768) - Also, strengthened the cycle detection assertion in the add_parent function. --- .../src/query_plan/fetch_dependency_graph.rs | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index 1b6bedb55b..d1360120c7 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -729,6 +729,7 @@ impl FetchDependencyGraph { /// Adds another node as a parent of `child`, /// meaning that this fetch should happen after the provided one. + /// Assumption: The parent node is not a descendant of the child. fn add_parent(&mut self, child_id: NodeIndex, parent_relation: ParentRelation) { let ParentRelation { parent_node_id, @@ -738,8 +739,8 @@ impl FetchDependencyGraph { return; } assert!( - !self.graph.contains_edge(child_id, parent_node_id), - "Node {parent_node_id:?} is a child of {child_id:?}: \ + !self.is_descendant_of(parent_node_id, child_id), + "Node {parent_node_id:?} is a descendant of {child_id:?}: \ adding it as parent would create a cycle" ); self.on_modification(); @@ -794,15 +795,7 @@ impl FetchDependencyGraph { } fn is_descendant_of(&self, node_id: NodeIndex, maybe_ancestor_id: NodeIndex) -> bool { - if node_id == maybe_ancestor_id { - return true; - } - for child_id in self.children_of(node_id) { - if self.is_descendant_of(child_id, maybe_ancestor_id) { - return true; - } - } - false + petgraph::algo::has_path_connecting(&self.graph, maybe_ancestor_id, node_id, None) } /// Returns whether `node_id` is both a child of `maybe_parent_id` but also if we can show that the From d7101286267f026a3ce975c32089111a86cc9917 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 6 Aug 2024 14:15:07 +0200 Subject: [PATCH 035/108] Entity cache fix: update the cache key with private info on the first call (#5599) Co-authored-by: Coenen Benjamin --- .../fix_geal_test_private_info_caching.md | 5 + apollo-router/src/plugins/cache/entity.rs | 8 +- apollo-router/tests/common.rs | 34 +++-- .../enterprise/entity-cache/private/README.md | 3 + .../entity-cache/private/configuration.yaml | 21 +++ .../enterprise/entity-cache/private/plan.json | 125 ++++++++++++++++++ .../entity-cache/private/private.rhai | 21 +++ .../entity-cache/private/supergraph.graphql | 91 +++++++++++++ apollo-router/tests/samples_tests.rs | 9 +- 9 files changed, 304 insertions(+), 13 deletions(-) create mode 100644 .changesets/fix_geal_test_private_info_caching.md create mode 100644 apollo-router/tests/samples/enterprise/entity-cache/private/README.md create mode 100644 apollo-router/tests/samples/enterprise/entity-cache/private/configuration.yaml create mode 100644 apollo-router/tests/samples/enterprise/entity-cache/private/plan.json create mode 100644 apollo-router/tests/samples/enterprise/entity-cache/private/private.rhai create mode 100644 apollo-router/tests/samples/enterprise/entity-cache/private/supergraph.graphql diff --git a/.changesets/fix_geal_test_private_info_caching.md b/.changesets/fix_geal_test_private_info_caching.md new file mode 100644 index 0000000000..22a83ff8b2 --- /dev/null +++ b/.changesets/fix_geal_test_private_info_caching.md @@ -0,0 +1,5 @@ +### Entity cache fix: update the cache key with private info on the first call ([PR #5599](https://github.com/apollographql/router/pull/5599)) + +This adds a test for private information caching and fixes an issue where private data was stored at the wrong key, so it did not appear to be cached + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5599 \ No newline at end of file diff --git a/apollo-router/src/plugins/cache/entity.rs b/apollo-router/src/plugins/cache/entity.rs index 1bdb3c6614..443a60d9d8 100644 --- a/apollo-router/src/plugins/cache/entity.rs +++ b/apollo-router/src/plugins/cache/entity.rs @@ -590,11 +590,13 @@ impl InnerCacheService { // we did not know in advance that this was a query with a private scope, so we update the cache key if !is_known_private { self.private_queries.write().await.insert(query.to_string()); + + if let Some(s) = private_id.as_ref() { + root_cache_key = format!("{root_cache_key}:{s}"); + } } - if let Some(s) = private_id.as_ref() { - root_cache_key = format!("{root_cache_key}:{s}"); - } else { + if private_id.is_none() { // the response has a private scope but we don't have a way to differentiate users, so we do not store the response in cache return Ok(response); } diff --git a/apollo-router/tests/common.rs b/apollo-router/tests/common.rs index 8cd6ffc1c0..4265605971 100644 --- a/apollo-router/tests/common.rs +++ b/apollo-router/tests/common.rs @@ -491,6 +491,7 @@ impl IntegrationTest { self.execute_query_internal( &json!({"query":"query {topProducts{name}}","variables":{}}), None, + None, ) } @@ -499,34 +500,44 @@ impl IntegrationTest { &self, query: &Value, ) -> impl std::future::Future { - self.execute_query_internal(query, None) + self.execute_query_internal(query, None, None) } #[allow(dead_code)] pub fn execute_bad_query( &self, ) -> impl std::future::Future { - self.execute_query_internal(&json!({"garbage":{}}), None) + self.execute_query_internal(&json!({"garbage":{}}), None, None) } #[allow(dead_code)] pub fn execute_huge_query( &self, ) -> impl std::future::Future { - self.execute_query_internal(&json!({"query":"query {topProducts{name, name, name, name, name, name, name, name, name, name}}","variables":{}}), None) + self.execute_query_internal(&json!({"query":"query {topProducts{name, name, name, name, name, name, name, name, name, name}}","variables":{}}), None, None) } #[allow(dead_code)] pub fn execute_bad_content_type( &self, ) -> impl std::future::Future { - self.execute_query_internal(&json!({"garbage":{}}), Some("garbage")) + self.execute_query_internal(&json!({"garbage":{}}), Some("garbage"), None) + } + + #[allow(dead_code)] + pub fn execute_query_with_headers( + &self, + query: &Value, + headers: HashMap, + ) -> impl std::future::Future { + self.execute_query_internal(query, None, Some(headers)) } fn execute_query_internal( &self, query: &Value, content_type: Option<&'static str>, + headers: Option>, ) -> impl std::future::Future { assert!( self.router.is_some(), @@ -544,7 +555,7 @@ impl IntegrationTest { async move { let client = reqwest::Client::new(); - let mut request = client + let mut builder = client .post(url) .header( CONTENT_TYPE, @@ -553,10 +564,15 @@ impl IntegrationTest { .header("apollographql-client-name", "custom_name") .header("apollographql-client-version", "1.0") .header("x-my-header", "test") - .header("head", "test") - .json(&query) - .build() - .unwrap(); + .header("head", "test"); + + if let Some(headers) = headers { + for (name, value) in headers { + builder = builder.header(name, value); + } + } + + let mut request = builder.json(&query).build().unwrap(); telemetry.inject_context(&mut request); dbg!(&request.headers()); request.headers_mut().remove(ACCEPT); diff --git a/apollo-router/tests/samples/enterprise/entity-cache/private/README.md b/apollo-router/tests/samples/enterprise/entity-cache/private/README.md new file mode 100644 index 0000000000..5e9504f9bb --- /dev/null +++ b/apollo-router/tests/samples/enterprise/entity-cache/private/README.md @@ -0,0 +1,3 @@ +# Entity cache: private data caching + +This tests private data caching in the entity cache: diff --git a/apollo-router/tests/samples/enterprise/entity-cache/private/configuration.yaml b/apollo-router/tests/samples/enterprise/entity-cache/private/configuration.yaml new file mode 100644 index 0000000000..65dd9ebad1 --- /dev/null +++ b/apollo-router/tests/samples/enterprise/entity-cache/private/configuration.yaml @@ -0,0 +1,21 @@ +override_subgraph_url: + products: http://localhost:4005 +include_subgraph_errors: + all: true + +rhai: + scripts: "tests/samples/enterprise/entity-cache/private" + main: "private.rhai" + +preview_entity_cache: + enabled: true + redis: + urls: + ["redis://localhost:6379",] + subgraph: + all: + enabled: true + ttl: 10s + subgraphs: + accounts: + private_id: "user" \ No newline at end of file diff --git a/apollo-router/tests/samples/enterprise/entity-cache/private/plan.json b/apollo-router/tests/samples/enterprise/entity-cache/private/plan.json new file mode 100644 index 0000000000..b466291766 --- /dev/null +++ b/apollo-router/tests/samples/enterprise/entity-cache/private/plan.json @@ -0,0 +1,125 @@ +{ + "enterprise": true, + "redis": true, + "actions": [ + { + "type": "Start", + "schema_path": "./supergraph.graphql", + "configuration_path": "./configuration.yaml", + "subgraphs": { + "accounts": { + "requests": [ + { + "request": { + "body": {"query":"query private__accounts__0{me{name}}"} + }, + "response": { + "headers": { + "Cache-Control": "private, max-age=10", + "Content-Type": "application/json" + }, + "body": {"data": { "me": { "name": "test" } } } + } + } + ] + } + } + }, + { + "type": "Request", + "request": { + "query": "query private { me { name } }" + }, + "headers": { + "x-user": "1" + }, + "expected_response": { + "data":{ + "me":{ + "name":"test" + } + } + } + }, + { + "type": "ReloadSubgraphs", + "subgraphs": { + "accounts": { + "requests": [ + { + "request": { + "body": {"query":"query private__accounts__0{me{name}}"} + }, + "response": { + "headers": { + "Cache-Control": "private, max-age=10", + "Content-Type": "application/json" + }, + "body": {"data": { "me": { "name": "test2" } } } + } + } + ] + } + } + }, + { + "type": "Request", + "request": { + "query": "query private { me { name } }" + }, + "headers": { + "x-user": "2" + }, + "expected_response": { + "data":{ + "me":{ + "name":"test2" + } + } + } + }, + { + "type": "ReloadSubgraphs", + "subgraphs": { + "accounts": { + "requests": [] + } + } + }, + { + "type": "Request", + "request": { + "query": "query private { me { name } }" + }, + "headers": { + "x-user": "1" + }, + "expected_response": { + "data":{ + "me":{ + "name":"test" + } + } + } + }, + { + "type": "Request", + "request": { + "query": "query private { me { name } }" + }, + "headers": { + "x-user": "2" + }, + "expected_response": { + "data":{ + "me":{ + "name":"test2" + } + } + } + }, + { + "type": "Stop" + } + ] +} \ No newline at end of file diff --git a/apollo-router/tests/samples/enterprise/entity-cache/private/private.rhai b/apollo-router/tests/samples/enterprise/entity-cache/private/private.rhai new file mode 100644 index 0000000000..d45c355969 --- /dev/null +++ b/apollo-router/tests/samples/enterprise/entity-cache/private/private.rhai @@ -0,0 +1,21 @@ +fn supergraph_service(service) { + const request_callback = Fn("process_request"); + service.map_request(request_callback); +} + +// This will convert all cookie pairs into headers. +// If you only wish to convert certain cookies, you +// can add logic to modify the processing. +fn process_request(request) { + +print(`headers: ${request.headers}`); + // Find our cookies + if "x-user" in request.headers { + let user = request.headers["x-user"]; + print(`found user {user}`); + + request.context["user"] = user; + } else { + print("no user found"); + } +} diff --git a/apollo-router/tests/samples/enterprise/entity-cache/private/supergraph.graphql b/apollo-router/tests/samples/enterprise/entity-cache/private/supergraph.graphql new file mode 100644 index 0000000000..1196414b6f --- /dev/null +++ b/apollo-router/tests/samples/enterprise/entity-cache/private/supergraph.graphql @@ -0,0 +1,91 @@ + +schema + @core(feature: "https://specs.apollo.dev/core/v0.2"), + @core(feature: "https://specs.apollo.dev/join/v0.1", for: EXECUTION) + @core(feature: "https://specs.apollo.dev/inaccessible/v0.1", for: SECURITY) +{ + query: Query + mutation: Mutation +} + +directive @core(as: String, feature: String!, for: core__Purpose) repeatable on SCHEMA + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet) on FIELD_DEFINITION + +directive @join__type(graph: join__Graph!, key: join__FieldSet) repeatable on OBJECT | INTERFACE + +directive @join__owner(graph: join__Graph!) on OBJECT | INTERFACE + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @tag(name: String!) repeatable on FIELD_DEFINITION | INTERFACE | OBJECT | UNION + +directive @inaccessible on OBJECT | FIELD_DEFINITION | INTERFACE | UNION + +enum core__Purpose { + """ + `EXECUTION` features provide metadata necessary to for operation execution. + """ + EXECUTION + + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY +} + +scalar join__FieldSet + +enum join__Graph { + ACCOUNTS @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev") + INVENTORY @join__graph(name: "inventory", url: "https://inventory.demo.starstuff.dev") + PRODUCTS @join__graph(name: "products", url: "https://products.demo.starstuff.dev") + REVIEWS @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev") +} +type Mutation { + updateMyAccount: User @join__field(graph: ACCOUNTS) + createProduct(name: String, upc: ID!): Product @join__field(graph: PRODUCTS) + createReview(body: String, id: ID!, upc: ID!): Review @join__field(graph: REVIEWS) +} + +type Product + @join__owner(graph: PRODUCTS) + @join__type(graph: PRODUCTS, key: "upc") + @join__type(graph: INVENTORY, key: "upc") + @join__type(graph: REVIEWS, key: "upc") +{ + inStock: Boolean @join__field(graph: INVENTORY) @tag(name: "private") @inaccessible + name: String @join__field(graph: PRODUCTS) + price: Int @join__field(graph: PRODUCTS) + reviews: [Review] @join__field(graph: REVIEWS) + reviewsForAuthor(authorID: ID!): [Review] @join__field(graph: REVIEWS) + shippingEstimate: Int @join__field(graph: INVENTORY, requires: "price weight") + upc: String! @join__field(graph: PRODUCTS) + weight: Int @join__field(graph: PRODUCTS) +} + +type Query { + me: User @join__field(graph: ACCOUNTS) + topProducts(first: Int = 5): [Product] @join__field(graph: PRODUCTS) +} + +type Review + @join__owner(graph: REVIEWS) + @join__type(graph: REVIEWS, key: "id") +{ + author: User @join__field(graph: REVIEWS, provides: "username") + body: String @join__field(graph: REVIEWS) + id: ID! @join__field(graph: REVIEWS) + product: Product @join__field(graph: REVIEWS) +} + +type User + @join__owner(graph: ACCOUNTS) + @join__type(graph: ACCOUNTS, key: "id") + @join__type(graph: REVIEWS, key: "id") +{ + id: ID! @join__field(graph: ACCOUNTS) + name: String @join__field(graph: ACCOUNTS) + reviews: [Review] @join__field(graph: REVIEWS) + username: String @join__field(graph: ACCOUNTS) +} diff --git a/apollo-router/tests/samples_tests.rs b/apollo-router/tests/samples_tests.rs index b6f2f902f1..da324cfbd7 100644 --- a/apollo-router/tests/samples_tests.rs +++ b/apollo-router/tests/samples_tests.rs @@ -165,11 +165,13 @@ impl TestExecution { Action::Request { request, query_path, + headers, expected_response, } => { self.request( request.clone(), query_path.as_deref(), + headers, expected_response, path, out, @@ -410,6 +412,7 @@ impl TestExecution { &mut self, mut request: Value, query_path: Option<&str>, + headers: &HashMap, expected_response: &Value, path: &Path, out: &mut String, @@ -434,7 +437,9 @@ impl TestExecution { } writeln!(out, "query: {}\n", serde_json::to_string(&request).unwrap()).unwrap(); - let (_, response) = router.execute_query(&request).await; + let (_, response) = router + .execute_query_with_headers(&request, headers.clone()) + .await; let body = response.bytes().await.map_err(|e| { writeln!(out, "could not get graphql response data: {e}").unwrap(); let f: Failed = out.clone().into(); @@ -575,6 +580,8 @@ enum Action { Request { request: Value, query_path: Option, + #[serde(default)] + headers: HashMap, expected_response: Value, }, EndpointRequest { From e028eb5d75df95924e5081a734a63be8ec96f754 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Tue, 6 Aug 2024 16:23:46 +0200 Subject: [PATCH 036/108] chore(federation): remove `for_each_element` (#5778) --- apollo-federation/src/operation/mod.rs | 64 ----------------------- apollo-federation/src/schema/field_set.rs | 61 ++++++++++++--------- 2 files changed, 36 insertions(+), 89 deletions(-) diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index e6f4e688b7..a585cbd7a2 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -942,22 +942,6 @@ impl Selection { } } } - - pub(crate) fn for_each_element( - &self, - parent_type_position: CompositeTypeDefinitionPosition, - callback: &mut impl FnMut(OpPathElement) -> Result<(), FederationError>, - ) -> Result<(), FederationError> { - match self { - Selection::Field(field_selection) => field_selection.for_each_element(callback), - Selection::InlineFragment(inline_fragment_selection) => { - inline_fragment_selection.for_each_element(callback) - } - Selection::FragmentSpread(fragment_spread_selection) => { - fragment_spread_selection.for_each_element(parent_type_position, callback) - } - } - } } impl From for Selection { @@ -1575,22 +1559,6 @@ impl FragmentSpreadSelection { } self.selection_set.any_element(predicate) } - - pub(crate) fn for_each_element( - &self, - parent_type_position: CompositeTypeDefinitionPosition, - callback: &mut impl FnMut(OpPathElement) -> Result<(), FederationError>, - ) -> Result<(), FederationError> { - let inline_fragment = InlineFragment::new(InlineFragmentData { - schema: self.spread.schema.clone(), - parent_type_position, - type_condition_position: Some(self.spread.type_condition_position.clone()), - directives: self.spread.directives.clone(), - selection_id: self.spread.selection_id.clone(), - }); - callback(inline_fragment.into())?; - self.selection_set.for_each_element(callback) - } } impl FragmentSpreadData { @@ -2916,19 +2884,6 @@ impl SelectionSet { } Ok(false) } - - /// Runs the given callback for all elements in the selection set and their descendants. Note - /// that fragment spread selections are converted to inline fragment elements, and their - /// fragment selection sets are recursed into. - pub(crate) fn for_each_element( - &self, - callback: &mut impl FnMut(OpPathElement) -> Result<(), FederationError>, - ) -> Result<(), FederationError> { - for selection in self.selections.values() { - selection.for_each_element(self.type_position.clone(), callback)? - } - Ok(()) - } } impl IntoIterator for SelectionSet { @@ -3244,17 +3199,6 @@ impl FieldSelection { } Ok(false) } - - pub(crate) fn for_each_element( - &self, - callback: &mut impl FnMut(OpPathElement) -> Result<(), FederationError>, - ) -> Result<(), FederationError> { - callback(self.field.clone().into())?; - if let Some(selection_set) = &self.selection_set { - selection_set.for_each_element(callback)?; - } - Ok(()) - } } impl Field { @@ -3418,14 +3362,6 @@ impl InlineFragmentSelection { } self.selection_set.any_element(predicate) } - - pub(crate) fn for_each_element( - &self, - callback: &mut impl FnMut(OpPathElement) -> Result<(), FederationError>, - ) -> Result<(), FederationError> { - callback(self.inline_fragment.clone().into())?; - self.selection_set.for_each_element(callback) - } } /// This uses internal copy-on-write optimization to make `Clone` cheap. diff --git a/apollo-federation/src/schema/field_set.rs b/apollo-federation/src/schema/field_set.rs index 077f33c7d5..6aee222a35 100644 --- a/apollo-federation/src/schema/field_set.rs +++ b/apollo-federation/src/schema/field_set.rs @@ -10,8 +10,8 @@ use crate::error::FederationError; use crate::error::MultipleFederationErrors; use crate::error::SingleFederationError; use crate::operation::NamedFragments; +use crate::operation::Selection; use crate::operation::SelectionSet; -use crate::query_graph::graph_path::OpPathElement; use crate::schema::position::CompositeTypeDefinitionPosition; use crate::schema::position::FieldDefinitionPosition; use crate::schema::position::InterfaceTypeDefinitionPosition; @@ -23,28 +23,39 @@ use crate::schema::ValidFederationSchema; // Federation spec does not allow the alias syntax in field set strings. // However, since `parse_field_set` uses the standard GraphQL parser, which allows aliases, // we need this secondary check to ensure that aliases are not used. -fn check_absence_of_aliases( - selection_set: &SelectionSet, - code_str: &str, -) -> Result<(), FederationError> { - let mut alias_errors = vec![]; - selection_set.for_each_element(&mut |elem| { - let OpPathElement::Field(field) = elem else { - return Ok(()); - }; - let Some(alias) = &field.alias else { - return Ok(()); - }; - alias_errors.push(SingleFederationError::UnsupportedFeature { - // PORT_NOTE: The JS version also quotes the directive name in the error message. - // For example, "aliases are not currently supported in @requires". - message: format!( - r#"Cannot use alias "{}" in "{}": aliases are not currently supported in the used directive"#, - alias, code_str) - }); +fn check_absence_of_aliases(selection_set: &SelectionSet) -> Result<(), FederationError> { + fn visit_selection_set( + errors: &mut MultipleFederationErrors, + selection_set: &SelectionSet, + ) -> Result<(), FederationError> { + for selection in selection_set.iter() { + match selection { + Selection::FragmentSpread(_) => { + return Err(FederationError::internal( + "check_absence_of_aliases(): unexpected fragment spread", + )) + } + Selection::InlineFragment(frag) => check_absence_of_aliases(&frag.selection_set)?, + Selection::Field(field) => { + if let Some(alias) = &field.field.alias { + errors.push(SingleFederationError::UnsupportedFeature { + // PORT_NOTE: The JS version also quotes the directive name in the error message. + // For example, "aliases are not currently supported in @requires". + message: format!(r#"Cannot use alias "{alias}" in "{}": aliases are not currently supported in the used directive"#, field.field) + }.into()); + } + if let Some(selection_set) = &field.selection_set { + visit_selection_set(errors, selection_set)?; + } + } + } + } Ok(()) - })?; - MultipleFederationErrors::from_iter(alias_errors).into_result() + } + + let mut errors = MultipleFederationErrors { errors: vec![] }; + visit_selection_set(&mut errors, selection_set)?; + errors.into_result() } // TODO: In the JS codebase, this has some error-rewriting to help give the user better hints around @@ -69,7 +80,7 @@ pub(crate) fn parse_field_set( SelectionSet::from_selection_set(&field_set.selection_set, &named_fragments, schema)?; // Validate the field set has no aliases. - check_absence_of_aliases(&selection_set, value)?; + check_absence_of_aliases(&selection_set)?; Ok(selection_set) } @@ -238,8 +249,8 @@ mod tests { assert_eq!( err.to_string(), r#"The following errors occurred: - - Cannot use alias "r1" in "r1: r s q1: q": aliases are not currently supported in the used directive - - Cannot use alias "q1" in "r1: r s q1: q": aliases are not currently supported in the used directive"# + - Cannot use alias "r1" in "r1: r": aliases are not currently supported in the used directive + - Cannot use alias "q1" in "q1: q": aliases are not currently supported in the used directive"# ); Ok(()) } From cac27503615bcb1ec482f44ca45248115e8d96c9 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Wed, 7 Aug 2024 14:23:05 +0100 Subject: [PATCH 037/108] Add metrics for cache entry memory size (#5770) Co-authored-by: bryn Co-authored-by: Edward Huang --- .changesets/feat_enhanced_observability.md | 13 + .changesets/fix_missing_cache_gauge.md | 5 + apollo-router/src/cache/mod.rs | 6 +- apollo-router/src/cache/redis.rs | 7 + apollo-router/src/cache/size_estimation.rs | 438 ++++++++++++++++++ apollo-router/src/cache/storage.rs | 313 +++++++++++-- apollo-router/src/plugins/cache/entity.rs | 7 + .../file_uploads/rearrange_query_plan.rs | 1 + .../src/query_planner/bridge_query_planner.rs | 1 + .../query_planner/caching_query_planner.rs | 14 + apollo-router/src/query_planner/plan.rs | 30 ++ apollo-router/src/query_planner/tests.rs | 9 +- .../src/services/supergraph/service.rs | 1 + .../configuration/in-memory-caching.mdx | 13 +- .../instrumentation/standard-instruments.mdx | 1 + docs/source/containerization/kubernetes.mdx | 23 + 16 files changed, 846 insertions(+), 36 deletions(-) create mode 100644 .changesets/feat_enhanced_observability.md create mode 100644 .changesets/fix_missing_cache_gauge.md create mode 100644 apollo-router/src/cache/size_estimation.rs diff --git a/.changesets/feat_enhanced_observability.md b/.changesets/feat_enhanced_observability.md new file mode 100644 index 0000000000..703e0a6918 --- /dev/null +++ b/.changesets/feat_enhanced_observability.md @@ -0,0 +1,13 @@ +### New `apollo.router.cache.storage.estimated_size` gauge ([PR #5770](https://github.com/apollographql/router/pull/5770)) + +The router supports the new metric `apollo.router.cache.storage.estimated_size` that helps users understand and monitor the amount of memory that query planner cache entries consume. + +The `apollo.router.cache.storage.estimated_size` metric gives an estimated size in bytes of a cache entry. It has the following attributes: +- `kind`: `query planner`. +- `storage`: `memory`. + +Before using the estimate to decide whether to update the cache, users should validate that the estimate correlates with their pod's memory usage. + +To learn how to troubleshoot with this metric, see the [Pods terminating due to memory pressure](https://www.apollographql.com/docs/router/containerization/kubernetes#pods-terminating-due-to-memory-pressure) guide in docs. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5770 \ No newline at end of file diff --git a/.changesets/fix_missing_cache_gauge.md b/.changesets/fix_missing_cache_gauge.md new file mode 100644 index 0000000000..1b71523210 --- /dev/null +++ b/.changesets/fix_missing_cache_gauge.md @@ -0,0 +1,5 @@ +### Fix missing `apollo_router_cache_size` metric ([PR #5770](https://github.com/apollographql/router/pull/5770)) + +Previously, if the in-memory cache wasn't mutated, the `apollo_router_cache_size` metric wouldn't be available. This has been fixed in this release. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5770 diff --git a/apollo-router/src/cache/mod.rs b/apollo-router/src/cache/mod.rs index 80daa1d8a0..6e1ef01cb8 100644 --- a/apollo-router/src/cache/mod.rs +++ b/apollo-router/src/cache/mod.rs @@ -14,7 +14,9 @@ use self::storage::ValueType; use crate::configuration::RedisCache; pub(crate) mod redis; +mod size_estimation; pub(crate) mod storage; +pub(crate) use size_estimation::estimate_size; type WaitMap = Arc>>>; pub(crate) const DEFAULT_CACHE_CAPACITY: NonZeroUsize = match NonZeroUsize::new(512) { @@ -37,7 +39,7 @@ where pub(crate) async fn with_capacity( capacity: NonZeroUsize, redis: Option, - caller: &str, + caller: &'static str, ) -> Result { Ok(Self { wait_map: Arc::new(Mutex::new(HashMap::new())), @@ -47,7 +49,7 @@ where pub(crate) async fn from_configuration( config: &crate::configuration::Cache, - caller: &str, + caller: &'static str, ) -> Result { Self::with_capacity(config.in_memory.limit, config.redis.clone(), caller).await } diff --git a/apollo-router/src/cache/redis.rs b/apollo-router/src/cache/redis.rs index f0973551f4..ae0697b3cf 100644 --- a/apollo-router/src/cache/redis.rs +++ b/apollo-router/src/cache/redis.rs @@ -593,12 +593,19 @@ mod test { use url::Url; + use crate::cache::storage::ValueType; + #[test] fn ensure_invalid_payload_serialization_doesnt_fail() { #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] struct Stuff { time: SystemTime, } + impl ValueType for Stuff { + fn estimated_size(&self) -> Option { + None + } + } let invalid_json_payload = super::RedisValue(Stuff { // this systemtime is invalid, serialization will fail diff --git a/apollo-router/src/cache/size_estimation.rs b/apollo-router/src/cache/size_estimation.rs new file mode 100644 index 0000000000..885e8d5c13 --- /dev/null +++ b/apollo-router/src/cache/size_estimation.rs @@ -0,0 +1,438 @@ +use std::fmt::Debug; +use std::fmt::Display; +use std::fmt::Formatter; + +use serde::ser; +use serde::ser::SerializeMap; +use serde::ser::SerializeSeq; +use serde::ser::SerializeStruct; +use serde::ser::SerializeStructVariant; +use serde::ser::SerializeTuple; +use serde::ser::SerializeTupleStruct; +use serde::ser::SerializeTupleVariant; +use serde::Serialize; + +pub(crate) fn estimate_size(s: &T) -> usize { + let ser = s + .serialize(CountingSerializer::default()) + .expect("mut be able to serialize"); + ser.count +} + +pub(crate) struct Error; + +impl Debug for Error { + fn fmt(&self, _f: &mut Formatter<'_>) -> std::fmt::Result { + unreachable!() + } +} + +impl Display for Error { + fn fmt(&self, _f: &mut Formatter<'_>) -> std::fmt::Result { + unreachable!() + } +} + +impl std::error::Error for Error {} + +impl ser::Error for Error { + fn custom(_msg: T) -> Self { + unreachable!() + } +} + +/// This is a special serializer that doesn't store the serialized data, instead it counts the bytes +/// Yes, it's inaccurate, but we're looking for something that is relatively cheap to compute. +/// It doesn't take into account shared datastructures occurring multiple times and will give the +/// full estimated serialized cost. +#[derive(Default, Debug)] +struct CountingSerializer { + count: usize, +} + +impl ser::Serializer for CountingSerializer { + type Ok = Self; + type Error = Error; + type SerializeSeq = Self; + type SerializeTuple = Self; + type SerializeTupleStruct = Self; + type SerializeTupleVariant = Self; + type SerializeMap = Self; + type SerializeStruct = Self; + type SerializeStructVariant = Self; + + fn serialize_bool(mut self, _v: bool) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_i8(mut self, _v: i8) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_i16(mut self, _v: i16) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_i32(mut self, _v: i32) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_i64(mut self, _v: i64) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_u8(mut self, _v: u8) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_u16(mut self, _v: u16) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_u32(mut self, _v: u32) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_u64(mut self, _v: u64) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_f32(mut self, _v: f32) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_f64(mut self, _v: f64) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_char(mut self, _v: char) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_str(mut self, v: &str) -> Result { + //ptr + 8 bytes length + 8 bytes capacity + self.count += 24 + v.len(); + Ok(self) + } + + fn serialize_bytes(mut self, v: &[u8]) -> Result { + self.count += v.len(); + Ok(self) + } + + fn serialize_none(self) -> Result { + Ok(self) + } + + fn serialize_some(self, value: &T) -> Result + where + T: ?Sized + Serialize, + { + Ok(value.serialize(self).expect("failed to serialize")) + } + + fn serialize_unit(self) -> Result { + Ok(self) + } + + fn serialize_unit_struct(self, _name: &'static str) -> Result { + Ok(self) + } + + fn serialize_unit_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + ) -> Result { + Ok(self) + } + + fn serialize_newtype_struct( + self, + _name: &'static str, + value: &T, + ) -> Result + where + T: ?Sized + Serialize, + { + Ok(value.serialize(self).expect("failed to serialize")) + } + + fn serialize_newtype_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + value: &T, + ) -> Result + where + T: ?Sized + Serialize, + { + Ok(value.serialize(self).expect("failed to serialize")) + } + + fn serialize_seq(self, _len: Option) -> Result { + Ok(self) + } + + fn serialize_tuple(self, _len: usize) -> Result { + Ok(self) + } + + fn serialize_tuple_struct( + self, + _name: &'static str, + _len: usize, + ) -> Result { + Ok(self) + } + + fn serialize_tuple_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _len: usize, + ) -> Result { + Ok(self) + } + + fn serialize_map(self, _len: Option) -> Result { + Ok(self) + } + + fn serialize_struct( + self, + _name: &'static str, + _len: usize, + ) -> Result { + Ok(self) + } + + fn serialize_struct_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _len: usize, + ) -> Result { + Ok(self) + } +} +impl SerializeStructVariant for CountingSerializer { + type Ok = Self; + type Error = Error; + + fn serialize_field(&mut self, _key: &'static str, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = value + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn end(self) -> Result { + Ok(self) + } +} +impl SerializeSeq for CountingSerializer { + type Ok = Self; + type Error = Error; + + fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = value + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn end(self) -> Result { + Ok(self) + } +} +impl SerializeTuple for CountingSerializer { + type Ok = Self; + type Error = Error; + + fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = value + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn end(self) -> Result { + Ok(self) + } +} + +impl SerializeStruct for CountingSerializer { + type Ok = Self; + type Error = Error; + + fn serialize_field(&mut self, _key: &'static str, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = value + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn end(self) -> Result { + Ok(self) + } +} + +impl SerializeMap for CountingSerializer { + type Ok = Self; + type Error = Error; + + fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = key + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = value + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn end(self) -> Result { + Ok(self) + } +} + +impl SerializeTupleVariant for CountingSerializer { + type Ok = Self; + type Error = Error; + + fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = value + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn end(self) -> Result { + Ok(self) + } +} + +impl SerializeTupleStruct for CountingSerializer { + type Ok = Self; + type Error = Error; + + fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = value + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn end(self) -> Result { + Ok(self) + } +} + +#[cfg(test)] +mod test { + use serde::Serialize; + + use crate::cache::estimate_size; + + #[test] + fn test_estimate_size() { + #[derive(Serialize)] + struct Test { + string: String, + u8: u8, + embedded: TestEmbedded, + } + + #[derive(Serialize)] + struct TestEmbedded { + string: String, + u8: u8, + } + + // Baseline + let s = estimate_size(&Test { + string: "".to_string(), + u8: 0, + embedded: TestEmbedded { + string: "".to_string(), + u8: 0, + }, + }); + assert_eq!(s, 50); + + // Test modifying the root struct + let s = estimate_size(&Test { + string: "test".to_string(), + u8: 0, + embedded: TestEmbedded { + string: "".to_string(), + u8: 0, + }, + }); + assert_eq!(s, 54); + + // Test modifying the embedded struct + let s = estimate_size(&Test { + string: "".to_string(), + u8: 0, + embedded: TestEmbedded { + string: "test".to_string(), + u8: 0, + }, + }); + assert_eq!(s, 54); + } +} diff --git a/apollo-router/src/cache/storage.rs b/apollo-router/src/cache/storage.rs index b72ad9d378..7cfa37a0ad 100644 --- a/apollo-router/src/cache/storage.rs +++ b/apollo-router/src/cache/storage.rs @@ -2,9 +2,16 @@ use std::fmt::Display; use std::fmt::{self}; use std::hash::Hash; use std::num::NonZeroUsize; +use std::sync::atomic::AtomicI64; +use std::sync::atomic::Ordering; use std::sync::Arc; use lru::LruCache; +use opentelemetry::metrics::MeterProvider; +use opentelemetry_api::metrics::Meter; +use opentelemetry_api::metrics::ObservableGauge; +use opentelemetry_api::metrics::Unit; +use opentelemetry_api::KeyValue; use serde::de::DeserializeOwned; use serde::Serialize; use tokio::sync::Mutex; @@ -13,6 +20,8 @@ use tower::BoxError; use super::redis::*; use crate::configuration::RedisCache; +use crate::metrics; +use crate::plugins::telemetry::config_new::instruments::METER_NAME; pub(crate) trait KeyType: Clone + fmt::Debug + fmt::Display + Hash + Eq + Send + Sync @@ -21,6 +30,10 @@ pub(crate) trait KeyType: pub(crate) trait ValueType: Clone + fmt::Debug + Send + Sync + Serialize + DeserializeOwned { + /// Returns an estimated size of the cache entry in bytes. + fn estimated_size(&self) -> Option { + None + } } // Blanket implementation which satisfies the compiler @@ -32,15 +45,6 @@ where // It has the functions it needs already } -// Blanket implementation which satisfies the compiler -impl ValueType for V -where - V: Clone + fmt::Debug + Send + Sync + Serialize + DeserializeOwned, -{ - // Nothing to implement, since V already supports the other traits. - // It has the functions it needs already -} - pub(crate) type InMemoryCache = Arc>>; // placeholder storage module @@ -52,6 +56,10 @@ pub(crate) struct CacheStorage { caller: String, inner: Arc>>, redis: Option, + cache_size: Arc, + cache_estimated_storage: Arc, + _cache_size_gauge: ObservableGauge, + _cache_estimated_storage_gauge: ObservableGauge, } impl CacheStorage @@ -62,9 +70,19 @@ where pub(crate) async fn new( max_capacity: NonZeroUsize, config: Option, - caller: &str, + caller: &'static str, ) -> Result { + // Because calculating the cache size is expensive we do this as we go rather than iterating. This means storing the values for the gauges + let meter: opentelemetry::metrics::Meter = metrics::meter_provider().meter(METER_NAME); + let (cache_size, cache_size_gauge) = Self::create_cache_size_gauge(&meter, caller); + let (cache_estimated_storage, cache_estimated_storage_gauge) = + Self::create_cache_estimated_storage_size_gauge(&meter, caller); + Ok(Self { + _cache_size_gauge: cache_size_gauge, + _cache_estimated_storage_gauge: cache_estimated_storage_gauge, + cache_size, + cache_estimated_storage, caller: caller.to_string(), inner: Arc::new(Mutex::new(LruCache::new(max_capacity))), redis: if let Some(config) = config { @@ -89,6 +107,56 @@ where }) } + fn create_cache_size_gauge( + meter: &Meter, + caller: &'static str, + ) -> (Arc, ObservableGauge) { + let current_cache_size = Arc::new(AtomicI64::new(0)); + let current_cache_size_for_gauge = current_cache_size.clone(); + let cache_size_gauge = meter + // TODO move to dot naming convention + .i64_observable_gauge("apollo_router_cache_size") + .with_description("Cache size") + .with_callback(move |i| { + i.observe( + current_cache_size_for_gauge.load(Ordering::SeqCst), + &[ + KeyValue::new("kind", caller), + KeyValue::new("type", "memory"), + ], + ) + }) + .init(); + (current_cache_size, cache_size_gauge) + } + + fn create_cache_estimated_storage_size_gauge( + meter: &Meter, + caller: &'static str, + ) -> (Arc, ObservableGauge) { + let cache_estimated_storage = Arc::new(AtomicI64::new(0)); + let cache_estimated_storage_for_gauge = cache_estimated_storage.clone(); + let cache_estimated_storage_gauge = meter + .i64_observable_gauge("apollo.router.cache.storage.estimated_size") + .with_description("Estimated cache storage") + .with_unit(Unit::new("bytes")) + .with_callback(move |i| { + // If there's no storage then don't bother updating the gauge + let value = cache_estimated_storage_for_gauge.load(Ordering::SeqCst); + if value > 0 { + i.observe( + cache_estimated_storage_for_gauge.load(Ordering::SeqCst), + &[ + KeyValue::new("kind", caller), + KeyValue::new("type", "memory"), + ], + ) + } + }) + .init(); + (cache_estimated_storage, cache_estimated_storage_gauge) + } + /// `init_from_redis` is called with values newly deserialized from Redis cache /// if an error is returned, the value is ignored and considered a cache miss. pub(crate) async fn get( @@ -143,7 +211,7 @@ where }); match redis_value { Some(v) => { - self.inner.lock().await.put(key.clone(), v.0.clone()); + self.insert_in_memory(key.clone(), v.0.clone()).await; tracing::info!( monotonic_counter.apollo_router_cache_hit_count = 1u64, @@ -187,25 +255,33 @@ where .await; } - let mut in_memory = self.inner.lock().await; - in_memory.put(key, value); - let size = in_memory.len() as u64; - tracing::info!( - value.apollo_router_cache_size = size, - kind = %self.caller, - storage = &tracing::field::display(CacheStorageName::Memory), - ); + self.insert_in_memory(key, value).await; } - pub(crate) async fn insert_in_memory(&self, key: K, value: V) { - let mut in_memory = self.inner.lock().await; - in_memory.put(key, value); - let size = in_memory.len() as u64; - tracing::info!( - value.apollo_router_cache_size = size, - kind = %self.caller, - storage = &tracing::field::display(CacheStorageName::Memory), - ); + pub(crate) async fn insert_in_memory(&self, key: K, value: V) + where + V: ValueType, + { + // Update the cache size and estimated storage size + // This is cheaper than trying to estimate the cache storage size by iterating over the cache + let new_value_size = value.estimated_size().unwrap_or(0) as i64; + + let (old_value, length) = { + let mut in_memory = self.inner.lock().await; + (in_memory.push(key, value), in_memory.len()) + }; + + let size_delta = match old_value { + Some((_, old_value)) => { + let old_value_size = old_value.estimated_size().unwrap_or(0) as i64; + new_value_size - old_value_size + } + None => new_value_size, + }; + self.cache_estimated_storage + .fetch_add(size_delta, Ordering::SeqCst); + + self.cache_size.store(length as i64, Ordering::SeqCst); } pub(crate) fn in_memory_cache(&self) -> InMemoryCache { @@ -231,3 +307,184 @@ impl Display for CacheStorageName { } } } + +impl ValueType for String { + fn estimated_size(&self) -> Option { + Some(self.len()) + } +} + +impl ValueType for crate::graphql::Response { + fn estimated_size(&self) -> Option { + None + } +} + +impl ValueType for usize { + fn estimated_size(&self) -> Option { + Some(std::mem::size_of::()) + } +} + +#[cfg(test)] +mod test { + use std::num::NonZeroUsize; + + use crate::cache::estimate_size; + use crate::cache::storage::CacheStorage; + use crate::cache::storage::ValueType; + use crate::metrics::FutureMetricsExt; + + #[tokio::test] + async fn test_metrics() { + #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] + struct Stuff {} + impl ValueType for Stuff { + fn estimated_size(&self) -> Option { + Some(1) + } + } + + async { + let cache: CacheStorage = + CacheStorage::new(NonZeroUsize::new(10).unwrap(), None, "test") + .await + .unwrap(); + + cache.insert("test".to_string(), Stuff {}).await; + assert_gauge!( + "apollo.router.cache.storage.estimated_size", + 1, + "kind" = "test", + "type" = "memory" + ); + assert_gauge!( + "apollo_router_cache_size", + 1, + "kind" = "test", + "type" = "memory" + ); + } + .with_metrics() + .await; + } + + #[tokio::test] + #[should_panic] + async fn test_metrics_not_emitted_where_no_estimated_size() { + #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] + struct Stuff {} + impl ValueType for Stuff { + fn estimated_size(&self) -> Option { + None + } + } + + async { + let cache: CacheStorage = + CacheStorage::new(NonZeroUsize::new(10).unwrap(), None, "test") + .await + .unwrap(); + + cache.insert("test".to_string(), Stuff {}).await; + // This metric won't exist + assert_gauge!( + "apollo_router_cache_size", + 0, + "kind" = "test", + "type" = "memory" + ); + } + .with_metrics() + .await; + } + + #[tokio::test] + async fn test_metrics_eviction() { + #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] + struct Stuff { + test: String, + } + impl ValueType for Stuff { + fn estimated_size(&self) -> Option { + Some(estimate_size(self)) + } + } + + async { + // note that the cache size is 1 + // so the second insert will always evict + let cache: CacheStorage = + CacheStorage::new(NonZeroUsize::new(1).unwrap(), None, "test") + .await + .unwrap(); + + cache + .insert( + "test".to_string(), + Stuff { + test: "test".to_string(), + }, + ) + .await; + assert_gauge!( + "apollo.router.cache.storage.estimated_size", + 28, + "kind" = "test", + "type" = "memory" + ); + assert_gauge!( + "apollo_router_cache_size", + 1, + "kind" = "test", + "type" = "memory" + ); + + // Insert something slightly larger + cache + .insert( + "test".to_string(), + Stuff { + test: "test_extended".to_string(), + }, + ) + .await; + assert_gauge!( + "apollo.router.cache.storage.estimated_size", + 37, + "kind" = "test", + "type" = "memory" + ); + assert_gauge!( + "apollo_router_cache_size", + 1, + "kind" = "test", + "type" = "memory" + ); + + // Even though this is a new cache entry, we should get back to where we initially were + cache + .insert( + "test2".to_string(), + Stuff { + test: "test".to_string(), + }, + ) + .await; + assert_gauge!( + "apollo.router.cache.storage.estimated_size", + 28, + "kind" = "test", + "type" = "memory" + ); + assert_gauge!( + "apollo_router_cache_size", + 1, + "kind" = "test", + "type" = "memory" + ); + } + .with_metrics() + .await; + } +} diff --git a/apollo-router/src/plugins/cache/entity.rs b/apollo-router/src/plugins/cache/entity.rs index 443a60d9d8..deacb1db7f 100644 --- a/apollo-router/src/plugins/cache/entity.rs +++ b/apollo-router/src/plugins/cache/entity.rs @@ -36,6 +36,7 @@ use crate::batching::BatchQuery; use crate::cache::redis::RedisCacheStorage; use crate::cache::redis::RedisKey; use crate::cache::redis::RedisValue; +use crate::cache::storage::ValueType; use crate::configuration::subgraph::SubgraphConfiguration; use crate::configuration::RedisCache; use crate::error::FetchError; @@ -886,6 +887,12 @@ struct CacheEntry { data: Value, } +impl ValueType for CacheEntry { + fn estimated_size(&self) -> Option { + None + } +} + async fn cache_store_root_from_response( cache: RedisCacheStorage, subgraph_ttl: Option, diff --git a/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs b/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs index c7bfdc1ec4..22bcf3fdb6 100644 --- a/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs +++ b/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs @@ -45,6 +45,7 @@ pub(super) fn rearrange_query_plan( formatted_query_plan: query_plan.formatted_query_plan.clone(), query: query_plan.query.clone(), query_metrics: query_plan.query_metrics, + estimated_size: Default::default(), }) } diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index d4fcb1331c..f5772b4cbf 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -624,6 +624,7 @@ impl BridgeQueryPlanner { formatted_query_plan, query: Arc::new(selections), query_metrics, + estimated_size: Default::default(), }), }) } diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index 9d063e1652..f6718143ec 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -24,7 +24,9 @@ use tower_service::Service; use tracing::Instrument; use super::fetch::QueryHash; +use crate::cache::estimate_size; use crate::cache::storage::InMemoryCache; +use crate::cache::storage::ValueType; use crate::cache::DeduplicatingCache; use crate::error::CacheResolverError; use crate::error::QueryPlannerError; @@ -687,6 +689,17 @@ pub(crate) struct WarmUpCachingQueryKey { pub(crate) introspection: bool, } +impl ValueType for Result> { + fn estimated_size(&self) -> Option { + match self { + Ok(QueryPlannerContent::Plan { plan }) => Some(plan.estimated_size()), + Ok(QueryPlannerContent::Response { response }) => Some(estimate_size(response)), + Ok(QueryPlannerContent::IntrospectionDisabled) => None, + Err(e) => Some(estimate_size(e)), + } + } +} + #[cfg(test)] mod tests { use mockall::mock; @@ -838,6 +851,7 @@ mod tests { .into(), query: Arc::new(Query::empty()), query_metrics: Default::default(), + estimated_size: Default::default(), }; let qp_content = QueryPlannerContent::Plan { plan: Arc::new(query_plan), diff --git a/apollo-router/src/query_planner/plan.rs b/apollo-router/src/query_planner/plan.rs index bf4471e23b..447adb7ba7 100644 --- a/apollo-router/src/query_planner/plan.rs +++ b/apollo-router/src/query_planner/plan.rs @@ -1,3 +1,5 @@ +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; use std::sync::Arc; use apollo_compiler::validation::Valid; @@ -9,6 +11,7 @@ use serde::Serialize; pub(crate) use self::fetch::OperationKind; use super::fetch; use super::subscription::SubscriptionNode; +use crate::cache::estimate_size; use crate::configuration::Batching; use crate::error::CacheResolverError; use crate::error::ValidationErrors; @@ -42,6 +45,10 @@ pub struct QueryPlan { pub(crate) formatted_query_plan: Option>, pub(crate) query: Arc, pub(crate) query_metrics: OperationLimits, + + /// The estimated size in bytes of the query plan + #[serde(default)] + pub(crate) estimated_size: Arc, } /// This default impl is useful for test users @@ -64,6 +71,7 @@ impl QueryPlan { formatted_query_plan: Default::default(), query: Arc::new(Query::empty()), query_metrics: Default::default(), + estimated_size: Default::default(), } } } @@ -89,6 +97,14 @@ impl QueryPlan { self.root .query_hashes(batching_config, operation, variables, &self.query) } + + pub(crate) fn estimated_size(&self) -> usize { + if self.estimated_size.load(Ordering::SeqCst) == 0 { + self.estimated_size + .store(estimate_size(self), Ordering::SeqCst); + } + self.estimated_size.load(Ordering::SeqCst) + } } /// Query plans are composed of a set of nodes. @@ -607,3 +623,17 @@ pub(crate) struct DeferredNode { pub(crate) struct Depends { pub(crate) id: String, } + +#[cfg(test)] +mod test { + use crate::query_planner::QueryPlan; + + #[test] + fn test_estimated_size() { + let query_plan = QueryPlan::fake_builder().build(); + let size1 = query_plan.estimated_size(); + let size2 = query_plan.estimated_size(); + assert!(size1 > 0); + assert_eq!(size1, size2); + } +} diff --git a/apollo-router/src/query_planner/tests.rs b/apollo-router/src/query_planner/tests.rs index fd7fb6d8b6..cfd44d6d08 100644 --- a/apollo-router/src/query_planner/tests.rs +++ b/apollo-router/src/query_planner/tests.rs @@ -87,6 +87,7 @@ async fn mock_subgraph_service_withf_panics_should_be_reported_as_service_closed referenced_fields_by_type: Default::default(), } .into(), + estimated_size: Default::default(), }; let mut mock_products_service = plugin::test::MockSubgraphService::new(); @@ -142,6 +143,7 @@ async fn fetch_includes_operation_name() { .into(), query: Arc::new(Query::empty()), query_metrics: Default::default(), + estimated_size: Default::default(), }; let succeeded: Arc = Default::default(); @@ -202,6 +204,7 @@ async fn fetch_makes_post_requests() { .into(), query: Arc::new(Query::empty()), query_metrics: Default::default(), + estimated_size: Default::default(), }; let succeeded: Arc = Default::default(); @@ -329,7 +332,8 @@ async fn defer() { referenced_fields_by_type: Default::default(), }.into(), query: Arc::new(Query::empty()), - query_metrics: Default::default() + query_metrics: Default::default(), + estimated_size: Default::default(), }; let mut mock_x_service = plugin::test::MockSubgraphService::new(); @@ -460,6 +464,7 @@ async fn defer_if_condition() { ), formatted_query_plan: None, query_metrics: Default::default(), + estimated_size: Default::default(), }; let mocked_accounts = MockSubgraph::builder() @@ -642,6 +647,7 @@ async fn dependent_mutations() { .into(), query: Arc::new(Query::empty()), query_metrics: Default::default(), + estimated_size: Default::default(), }; let mut mock_a_service = plugin::test::MockSubgraphService::new(); @@ -1826,6 +1832,7 @@ fn broken_plan_does_not_panic() { .into(), query: Arc::new(Query::empty()), query_metrics: Default::default(), + estimated_size: Default::default(), }; let subgraph_schema = apollo_compiler::Schema::parse_and_validate(subgraph_schema, "").unwrap(); let mut subgraph_schemas = HashMap::new(); diff --git a/apollo-router/src/services/supergraph/service.rs b/apollo-router/src/services/supergraph/service.rs index a5ea8403d5..dec84074f8 100644 --- a/apollo-router/src/services/supergraph/service.rs +++ b/apollo-router/src/services/supergraph/service.rs @@ -454,6 +454,7 @@ async fn subscription_task( formatted_query_plan: query_plan.formatted_query_plan.clone(), query: query_plan.query.clone(), query_metrics: query_plan.query_metrics, + estimated_size: Default::default(), }) }), _ => { diff --git a/docs/source/configuration/in-memory-caching.mdx b/docs/source/configuration/in-memory-caching.mdx index 2acf126c59..de15cbaf6c 100644 --- a/docs/source/configuration/in-memory-caching.mdx +++ b/docs/source/configuration/in-memory-caching.mdx @@ -72,15 +72,18 @@ supergraph: To get more information on the planning and warm-up process use the following metrics (where `` can be `redis` for distributed cache or `memory`): * counters: - * `apollo_router_cache_size{kind="query planner", storage="}`: current size of the cache (only for in-memory cache) - * `apollo_router_cache_hit_count{kind="query planner", storage="}` - * `apollo_router_cache_miss_count{kind="query planner", storage="}` + * `apollo_router_cache_hit_count{kind="query planner", storage=""}` + * `apollo_router_cache_miss_count{kind="query planner", storage=""}` * histograms: * `apollo.router.query_planning.plan.duration`: time spent planning queries * `apollo_router_schema_loading_time`: time spent loading a schema - * `apollo_router_cache_hit_time{kind="query planner", storage="}`: time to get a value from the cache - * `apollo_router_cache_miss_time{kind="query planner", storage="}` + * `apollo_router_cache_hit_time{kind="query planner", storage=""}`: time to get a value from the cache + * `apollo_router_cache_miss_time{kind="query planner", storage=""}` + +* gauges + * `apollo_router_cache_size{kind="query planner", storage="memory"}`: current size of the cache (only for in-memory cache) + * `apollo.router.cache.storage.estimated_size{kind="query planner", storage="memory"}`: estimated storage size of the cache (only for in-memory query planner cache) Typically, we would look at `apollo_router_cache_size` and the cache hit rate to define the right size of the in memory cache, then look at `apollo_router_schema_loading_time` and `apollo.router.query_planning.plan.duration` to decide how much time we want to spend warming up queries. diff --git a/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx b/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx index 1a3358a38e..37c63e8b57 100644 --- a/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx +++ b/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx @@ -38,6 +38,7 @@ These instruments can be consumed by configuring a [metrics exporter](../exporte - `apollo_router_cache_miss_count` - Number of cache misses - `apollo_router_cache_hit_time` - Time to hit the cache in seconds - `apollo_router_cache_miss_time` - Time to miss the cache in seconds +- `apollo.router.cache.storage.estimated_size` - The estimated storage size of the cache in bytes (query planner in memory only). All cache metrics listed above have the following attributes: diff --git a/docs/source/containerization/kubernetes.mdx b/docs/source/containerization/kubernetes.mdx index a2393c5225..591e30db0d 100644 --- a/docs/source/containerization/kubernetes.mdx +++ b/docs/source/containerization/kubernetes.mdx @@ -285,3 +285,26 @@ The gateway propagates subgraph errors to clients, but the router doesn't by def include_subgraph_errors: all: true ``` + +## Troubleshooting + +### Pods terminating due to memory pressure + +If your deployment of routers is terminating due to memory pressure, you can add router cache metrics to monitor and remediate your system: + +1. Add and track the following metrics to your monitoring system: + + * `apollo.router.cache.storage.estimated_size` + * `apollo_router_cache_size` + * ratio of `apollo_router_cache_hits` to `apollo_router_cache_misses` + +2. Observe and monitor the metrics: + + * Observe the `apollo.router.cache.storage.estimated_size` to see if it grows over time and correlates with pod memory usage. + * Observe the ratio of cache hits to misses to determine if the cache is being effective. + +3. Based on your observations, try some remediating adjustments: + + * Lower the cache size if the cache reaches near 100% hit-rate but the cache size is still growing. + * Increase the pod memory if the cache hit rate is low and the cache size is still growing. + * Lower the cache size if the latency of query planning cache misses is acceptable and memory availability is limited. From d2a4b1cbd2756a021ac6f8d94f6cb5ee960e820f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Wed, 7 Aug 2024 15:51:39 +0200 Subject: [PATCH 038/108] feat(federation): add query planner config flags to the CLI (#5769) --- apollo-federation/cli/src/bench.rs | 12 +-- apollo-federation/cli/src/main.rs | 113 +++++++++++++++++++++++++---- 2 files changed, 99 insertions(+), 26 deletions(-) diff --git a/apollo-federation/cli/src/bench.rs b/apollo-federation/cli/src/bench.rs index cd098a1989..c672137982 100644 --- a/apollo-federation/cli/src/bench.rs +++ b/apollo-federation/cli/src/bench.rs @@ -11,17 +11,9 @@ use apollo_federation::Supergraph; pub(crate) fn run_bench( supergraph: Supergraph, queries_dir: &PathBuf, + config: QueryPlannerConfig, ) -> Result, FederationError> { - let planner = QueryPlanner::new( - &supergraph, - QueryPlannerConfig { - reuse_query_fragments: false, - subgraph_graphql_validation: false, - generate_query_fragments: true, - ..Default::default() - }, - ) - .expect("Invalid planner"); + let planner = QueryPlanner::new(&supergraph, config.clone()).expect("Invalid planner"); let mut entries = std::fs::read_dir(queries_dir) .unwrap() diff --git a/apollo-federation/cli/src/main.rs b/apollo-federation/cli/src/main.rs index 41ba7e5f08..7e707434e2 100644 --- a/apollo-federation/cli/src/main.rs +++ b/apollo-federation/cli/src/main.rs @@ -1,5 +1,6 @@ use std::fs; use std::io; +use std::num::NonZeroU32; use std::path::Path; use std::path::PathBuf; use std::process::ExitCode; @@ -17,6 +18,32 @@ use clap::Parser; mod bench; use bench::run_bench; +#[derive(Parser)] +struct QueryPlannerArgs { + /// Enable @defer support. + #[arg(long, default_value_t = false)] + enable_defer: bool, + /// Reuse fragments to compress subgraph queries. + #[arg(long, default_value_t = false)] + reuse_fragments: bool, + /// Generate fragments to compress subgraph queries. + #[arg(long, default_value_t = false)] + generate_fragments: bool, + /// Run GraphQL validation check on generated subgraph queries. (default: true) + #[arg(long, default_missing_value = "true", require_equals = true, num_args = 0..=1)] + subgraph_validation: Option, + /// Set the `debug.max_evaluated_plans` option. + #[arg(long)] + max_evaluated_plans: Option, + /// Set the `debug.paths_limit` option. + #[arg(long)] + paths_limit: Option, + /// If the supergraph only represents a single subgraph, pass through queries directly without + /// planning. + #[arg(long, default_value_t = false)] + single_subgraph_passthrough: bool, +} + /// CLI arguments. See #[derive(Parser)] struct Args { @@ -30,6 +57,9 @@ enum Command { Api { /// Path(s) to one supergraph schema file, `-` for stdin or multiple subgraph schemas. schemas: Vec, + /// Enable @defer support. + #[arg(long, default_value_t = false)] + enable_defer: bool, }, /// Outputs the query graph from a supergraph schema or subgraph schemas QueryGraph { @@ -46,6 +76,8 @@ enum Command { query: PathBuf, /// Path(s) to one supergraph schema file, `-` for stdin or multiple subgraph schemas. schemas: Vec, + #[command(flatten)] + planner: QueryPlannerArgs, }, /// Validate one supergraph schema file or multiple subgraph schemas Validate { @@ -69,16 +101,48 @@ enum Command { supergraph_schema: PathBuf, /// The path to the directory that contains all operations to run against operations_dir: PathBuf, + #[command(flatten)] + planner: QueryPlannerArgs, }, } +impl QueryPlannerArgs { + fn apply(&self, config: &mut QueryPlannerConfig) { + config.incremental_delivery.enable_defer = self.enable_defer; + // --generate-fragments trumps --reuse-fragments + config.reuse_query_fragments = self.reuse_fragments && !self.generate_fragments; + config.generate_query_fragments = self.generate_fragments; + config.subgraph_graphql_validation = self.subgraph_validation.unwrap_or(true); + if let Some(max_evaluated_plans) = self.max_evaluated_plans { + config.debug.max_evaluated_plans = max_evaluated_plans; + } + config.debug.paths_limit = self.paths_limit; + config.debug.bypass_planner_for_single_subgraph = self.single_subgraph_passthrough; + } +} + +impl From for QueryPlannerConfig { + fn from(value: QueryPlannerArgs) -> Self { + let mut config = QueryPlannerConfig::default(); + value.apply(&mut config); + config + } +} + fn main() -> ExitCode { let args = Args::parse(); let result = match args.command { - Command::Api { schemas } => to_api_schema(&schemas), - Command::QueryGraph { schemas } => dot_query_graph(&schemas), - Command::FederatedGraph { schemas } => dot_federated_graph(&schemas), - Command::Plan { query, schemas } => plan(&query, &schemas), + Command::Api { + schemas, + enable_defer, + } => cmd_api_schema(&schemas, enable_defer), + Command::QueryGraph { schemas } => cmd_query_graph(&schemas), + Command::FederatedGraph { schemas } => cmd_federated_graph(&schemas), + Command::Plan { + query, + schemas, + planner, + } => cmd_plan(&query, &schemas, planner), Command::Validate { schemas } => cmd_validate(&schemas), Command::Compose { schemas } => cmd_compose(&schemas), Command::Extract { @@ -88,7 +152,8 @@ fn main() -> ExitCode { Command::Bench { supergraph_schema, operations_dir, - } => cmd_bench(&supergraph_schema, &operations_dir), + planner, + } => cmd_bench(&supergraph_schema, &operations_dir, planner), }; match result { Err(error) => { @@ -108,10 +173,10 @@ fn read_input(input_path: &Path) -> String { } } -fn to_api_schema(file_paths: &[PathBuf]) -> Result<(), FederationError> { +fn cmd_api_schema(file_paths: &[PathBuf], enable_defer: bool) -> Result<(), FederationError> { let supergraph = load_supergraph(file_paths)?; let api_schema = supergraph.to_api_schema(apollo_federation::ApiSchemaOptions { - include_defer: true, + include_defer: enable_defer, include_stream: false, })?; println!("{}", api_schema.schema()); @@ -154,7 +219,7 @@ fn load_supergraph( } } -fn dot_query_graph(file_paths: &[PathBuf]) -> Result<(), FederationError> { +fn cmd_query_graph(file_paths: &[PathBuf]) -> Result<(), FederationError> { let supergraph = load_supergraph(file_paths)?; let name: &str = if file_paths.len() == 1 { file_paths[0].file_stem().unwrap().to_str().unwrap() @@ -167,7 +232,7 @@ fn dot_query_graph(file_paths: &[PathBuf]) -> Result<(), FederationError> { Ok(()) } -fn dot_federated_graph(file_paths: &[PathBuf]) -> Result<(), FederationError> { +fn cmd_federated_graph(file_paths: &[PathBuf]) -> Result<(), FederationError> { let supergraph = load_supergraph(file_paths)?; let api_schema = supergraph.to_api_schema(Default::default())?; let query_graph = @@ -176,13 +241,17 @@ fn dot_federated_graph(file_paths: &[PathBuf]) -> Result<(), FederationError> { Ok(()) } -fn plan(query_path: &Path, schema_paths: &[PathBuf]) -> Result<(), FederationError> { +fn cmd_plan( + query_path: &Path, + schema_paths: &[PathBuf], + planner: QueryPlannerArgs, +) -> Result<(), FederationError> { let query = read_input(query_path); let supergraph = load_supergraph(schema_paths)?; let query_doc = ExecutableDocument::parse_and_validate(supergraph.schema.schema(), query, query_path)?; - // TODO: add CLI parameters for config as needed - let config = QueryPlannerConfig::default(); + let config = QueryPlannerConfig::from(planner); + let planner = QueryPlanner::new(&supergraph, config)?; print!("{}", planner.build_query_plan(&query_doc, None)?); Ok(()) @@ -228,13 +297,18 @@ fn cmd_extract(file_path: &Path, dest: Option<&PathBuf>) -> Result<(), Federatio fn _cmd_bench( file_path: &Path, operations_dir: &PathBuf, + config: QueryPlannerConfig, ) -> Result, FederationError> { let supergraph = load_supergraph_file(file_path)?; - run_bench(supergraph, operations_dir) + run_bench(supergraph, operations_dir, config) } -fn cmd_bench(file_path: &Path, operations_dir: &PathBuf) -> Result<(), FederationError> { - let results = _cmd_bench(file_path, operations_dir)?; +fn cmd_bench( + file_path: &Path, + operations_dir: &PathBuf, + planner: QueryPlannerArgs, +) -> Result<(), FederationError> { + let results = _cmd_bench(file_path, operations_dir, planner.into())?; println!("| operation_name | time (ms) | evaluated_plans (max 10000) | error |"); println!("|----------------|----------------|-----------|-----------------------------|"); for r in results { @@ -245,5 +319,12 @@ fn cmd_bench(file_path: &Path, operations_dir: &PathBuf) -> Result<(), Federatio #[test] fn test_bench() { - insta::assert_json_snapshot!(_cmd_bench(Path::new("./fixtures/starstuff.graphql"), &PathBuf::from("./fixtures/queries")).unwrap(), { "[].timing" => 1.234 }); + insta::assert_json_snapshot!( + _cmd_bench( + Path::new("./fixtures/starstuff.graphql"), + &PathBuf::from("./fixtures/queries"), + Default::default(), + ).unwrap(), + { "[].timing" => 1.234 }, + ); } From ff6a3ae759f23002465f34013366458ff2237717 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Wed, 7 Aug 2024 19:40:38 +0200 Subject: [PATCH 039/108] chore(federation): refactor fragment usage counting (#5784) --- apollo-federation/src/operation/mod.rs | 108 +++++++++++--------- apollo-federation/src/operation/optimize.rs | 14 +-- 2 files changed, 61 insertions(+), 61 deletions(-) diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index a585cbd7a2..9e18e77e7d 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -841,25 +841,6 @@ impl Selection { } } - fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { - match self { - Selection::Field(field_selection) => { - if let Some(s) = field_selection.selection_set.clone() { - s.collect_used_fragment_names(aggregator) - } - } - Selection::InlineFragment(inline) => { - inline.selection_set.collect_used_fragment_names(aggregator); - } - Selection::FragmentSpread(fragment) => { - let current_count = aggregator - .entry(fragment.spread.fragment_name.clone()) - .or_default(); - *current_count += 1; - } - } - } - pub(crate) fn with_updated_selection_set( &self, selection_set: Option, @@ -1012,18 +993,6 @@ impl Fragment { }) } - // PORT NOTE: in JS code this is stored on the fragment - pub(crate) fn fragment_usages(&self) -> HashMap { - let mut usages = HashMap::new(); - self.selection_set.collect_used_fragment_names(&mut usages); - usages - } - - // PORT NOTE: in JS code this is stored on the fragment - pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { - self.selection_set.collect_used_fragment_names(aggregator) - } - fn has_defer(&self) -> bool { self.selection_set.has_defer() } @@ -2624,12 +2593,6 @@ impl SelectionSet { Ok(()) } - pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { - self.selections - .iter() - .for_each(|(_, s)| s.collect_used_fragment_names(aggregator)); - } - /// Removes the @defer directive from all selections without removing that selection. fn without_defer(&mut self) { for (_key, mut selection) in Arc::make_mut(&mut self.selections).iter_mut() { @@ -3435,17 +3398,6 @@ impl NamedFragments { self.fragments.contains_key(name) } - /** - * Collect the usages of fragments that are used within the selection of other fragments. - */ - pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { - for fragment in self.fragments.values() { - fragment - .selection_set - .collect_used_fragment_names(aggregator); - } - } - /// JS PORT NOTE: In JS implementation this method was named mapInDependencyOrder and accepted a lambda to /// apply transformation on the fragments. It was called when rebasing/filtering/expanding selection sets. /// JS PORT NOTE: In JS implementation this method was potentially returning `undefined`. In order to simplify the code @@ -3466,7 +3418,7 @@ impl NamedFragments { // the outcome of `map_to_expanded_selection_sets`. let mut fragments_map: IndexMap = IndexMap::default(); for fragment in fragments.values() { - let mut fragment_usages: HashMap = HashMap::new(); + let mut fragment_usages = HashMap::new(); NamedFragments::collect_fragment_usages(&fragment.selection_set, &mut fragment_usages); let usages: Vec = fragment_usages.keys().cloned().collect::>(); fragments_map.insert( @@ -3509,10 +3461,10 @@ impl NamedFragments { mapped_fragments } - // JS PORT - we need to calculate those for both executable::SelectionSet and SelectionSet + /// Just like our `SelectionSet::used_fragments`, but with apollo-compiler types fn collect_fragment_usages( selection_set: &executable::SelectionSet, - aggregator: &mut HashMap, + aggregator: &mut HashMap, ) { selection_set.selections.iter().for_each(|s| match s { executable::Selection::Field(f) => { @@ -3605,6 +3557,60 @@ impl RebasedFragments { } } +// Collect fragment usages from operation types. + +impl Selection { + fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { + match self { + Selection::Field(field_selection) => { + if let Some(s) = &field_selection.selection_set { + s.collect_used_fragment_names(aggregator) + } + } + Selection::InlineFragment(inline) => { + inline.selection_set.collect_used_fragment_names(aggregator); + } + Selection::FragmentSpread(fragment) => { + let current_count = aggregator + .entry(fragment.spread.fragment_name.clone()) + .or_default(); + *current_count += 1; + } + } + } +} + +impl SelectionSet { + pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { + for s in self.selections.values() { + s.collect_used_fragment_names(aggregator); + } + } + + pub(crate) fn used_fragments(&self) -> HashMap { + let mut usages = HashMap::new(); + self.collect_used_fragment_names(&mut usages); + usages + } +} + +impl Fragment { + pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { + self.selection_set.collect_used_fragment_names(aggregator) + } +} + +impl NamedFragments { + /// Collect the usages of fragments that are used within the selection of other fragments. + pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { + for fragment in self.fragments.values() { + fragment + .selection_set + .collect_used_fragment_names(aggregator); + } + } +} + // Collect used variables from operation types. pub(crate) struct VariableCollector<'s> { diff --git a/apollo-federation/src/operation/optimize.rs b/apollo-federation/src/operation/optimize.rs index a4f95f9bb9..13e56f748f 100644 --- a/apollo-federation/src/operation/optimize.rs +++ b/apollo-federation/src/operation/optimize.rs @@ -1128,8 +1128,6 @@ impl NamedFragments { selection_set: &SelectionSet, min_usage_to_optimize: u32, ) -> Result { - let min_usage_to_optimize: i32 = min_usage_to_optimize.try_into().unwrap_or(i32::MAX); - // Call `reduce_inner` repeatedly until we reach a fix-point, since newly computed // selection set may drop some fragment references due to normalization, which could lead // to further reduction. @@ -1168,20 +1166,16 @@ impl NamedFragments { } /// The inner loop body of `reduce` method. - /// - Takes i32 `min_usage_to_optimize` since `collect_used_fragment_names` counts usages in - /// i32. fn reduce_inner( &mut self, selection_set: &SelectionSet, - min_usage_to_optimize: i32, + min_usage_to_optimize: u32, ) -> Result { - // Initial computation of fragment usages in `selection_set`. - let mut usages = HashMap::new(); - selection_set.collect_used_fragment_names(&mut usages); + let mut usages = selection_set.used_fragments(); // Short-circuiting: Nothing was used => Drop everything (selection_set is unchanged). if usages.is_empty() { - self.retain(|_, _| false); + *self = Default::default(); return Ok(selection_set.clone()); } @@ -1252,7 +1246,7 @@ impl NamedFragments { ) } - fn update_usages(usages: &mut HashMap, fragment: &Node, usage_count: i32) { + fn update_usages(usages: &mut HashMap, fragment: &Node, usage_count: u32) { let mut inner_usages = HashMap::new(); fragment.collect_used_fragment_names(&mut inner_usages); From 2153fba106039ed22cd9a4463e63593e6459c666 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Wed, 7 Aug 2024 19:15:10 +0100 Subject: [PATCH 040/108] Revert "Datadog sampling priority not set (#5703)" (#5780) Co-authored-by: bryn --- .changesets/fix_bryn_revert_5703.md | 5 +++ .../datadog_exporter/exporter/model/v05.rs | 8 +++- .../telemetry/tracing/datadog_exporter/mod.rs | 41 +++++++++++++++---- 3 files changed, 45 insertions(+), 9 deletions(-) create mode 100644 .changesets/fix_bryn_revert_5703.md diff --git a/.changesets/fix_bryn_revert_5703.md b/.changesets/fix_bryn_revert_5703.md new file mode 100644 index 0000000000..56145fc3cd --- /dev/null +++ b/.changesets/fix_bryn_revert_5703.md @@ -0,0 +1,5 @@ +### Datadog underreported APM metrics ([PR #5780](https://github.com/apollographql/router/pull/5780)) + +This reverts [PR #5703](https://github.com/apollographql/router/pull/5703) which causes Datadog APM span metrics to be under-reported. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5780 diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/v05.rs b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/v05.rs index 8cd3f8e66f..fd1590966e 100644 --- a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/v05.rs +++ b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/v05.rs @@ -128,8 +128,12 @@ fn write_unified_tag<'a>( Ok(()) } -fn get_sampling_priority(_span: &SpanData) -> f64 { - 1.0 +fn get_sampling_priority(span: &SpanData) -> f64 { + if span.span_context.trace_state().priority_sampling_enabled() { + 1.0 + } else { + 0.0 + } } fn get_measuring(span: &SpanData) -> f64 { diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/mod.rs b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/mod.rs index d632eb5872..1c586d48c8 100644 --- a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/mod.rs +++ b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/mod.rs @@ -176,7 +176,7 @@ pub(crate) mod propagator { const DATADOG_SAMPLING_PRIORITY_HEADER: &str = "x-datadog-sampling-priority"; const TRACE_FLAG_DEFERRED: TraceFlags = TraceFlags::new(0x02); - pub(crate) const TRACE_STATE_PRIORITY_SAMPLING: &str = "psr"; + const TRACE_STATE_PRIORITY_SAMPLING: &str = "psr"; pub(crate) const TRACE_STATE_MEASURE: &str = "m"; pub(crate) const TRACE_STATE_TRUE_VALUE: &str = "1"; pub(crate) const TRACE_STATE_FALSE_VALUE: &str = "0"; @@ -243,6 +243,10 @@ pub(crate) mod propagator { fn with_measuring(&self, enabled: bool) -> TraceState; fn measuring_enabled(&self) -> bool; + + fn with_priority_sampling(&self, enabled: bool) -> TraceState; + + fn priority_sampling_enabled(&self) -> bool; } impl DatadogTraceState for TraceState { @@ -256,6 +260,20 @@ pub(crate) mod propagator { .map(trace_flag_to_boolean) .unwrap_or_default() } + + fn with_priority_sampling(&self, enabled: bool) -> TraceState { + self.insert( + TRACE_STATE_PRIORITY_SAMPLING, + boolean_to_trace_state_flag(enabled), + ) + .unwrap_or_else(|_err| self.clone()) + } + + fn priority_sampling_enabled(&self) -> bool { + self.get(TRACE_STATE_PRIORITY_SAMPLING) + .map(trace_flag_to_boolean) + .unwrap_or_default() + } } enum SamplingPriority { @@ -293,7 +311,16 @@ pub(crate) mod propagator { } fn create_trace_state_and_flags(trace_flags: TraceFlags) -> (TraceState, TraceFlags) { - (TraceState::default(), trace_flags) + if trace_flags & TRACE_FLAG_DEFERRED == TRACE_FLAG_DEFERRED { + (TraceState::default(), trace_flags) + } else { + ( + DatadogTraceStateBuilder::default() + .with_priority_sampling(trace_flags.is_sampled()) + .build(), + TraceFlags::SAMPLED, + ) + } } impl DatadogPropagator { @@ -373,7 +400,7 @@ pub(crate) mod propagator { } fn get_sampling_priority(span_context: &SpanContext) -> SamplingPriority { - if span_context.is_sampled() { + if span_context.trace_state().priority_sampling_enabled() { SamplingPriority::AutoKeep } else { SamplingPriority::AutoReject @@ -433,8 +460,8 @@ pub(crate) mod propagator { (vec![(DATADOG_TRACE_ID_HEADER, "garbage")], SpanContext::empty_context()), (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "garbage")], SpanContext::new(TraceId::from_u128(1234), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::default(), true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(false).build())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(true).build())), ] } @@ -446,8 +473,8 @@ pub(crate) mod propagator { (vec![], SpanContext::new(TraceId::from_hex("1234").unwrap(), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), (vec![], SpanContext::new(TraceId::from_hex("1234").unwrap(), SpanId::INVALID, TraceFlags::SAMPLED, true, TraceState::default())), (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::default(), true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(false).build())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(true).build())), ] } From 80fd0e48ae778749ddbb90f20b4c3c82b29e5154 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Thu, 8 Aug 2024 12:40:58 +0200 Subject: [PATCH 041/108] feat(federation): minimal fragment generation (#5772) --- apollo-federation/src/operation/mod.rs | 59 +--- apollo-federation/src/operation/optimize.rs | 190 +++++++++++- apollo-federation/src/operation/rebase.rs | 10 +- .../src/query_plan/fetch_dependency_graph.rs | 14 +- .../fetch_dependency_graph_processor.rs | 10 +- .../src/query_plan/query_planner.rs | 75 ++++- .../fragment_autogeneration.rs | 275 +++++++++++------- ...ate_their_own_fragment_definitions.graphql | 4 +- ..._fragments_with_one_non_leaf_field.graphql | 4 +- ...handles_nested_fragment_generation.graphql | 4 +- ...ent_fragments_that_arent_identical.graphql | 4 +- .../it_migrates_skip_include.graphql | 71 +++++ ...ts_generate_query_fragments_option.graphql | 4 +- 13 files changed, 527 insertions(+), 197 deletions(-) create mode 100644 apollo-federation/tests/query_plan/supergraphs/it_migrates_skip_include.graphql diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index 9e18e77e7d..66c4bc4654 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -1996,7 +1996,7 @@ impl SelectionSet { // if we don't expand fragments, we need to normalize it let normalized_fragment_spread = FragmentSpreadSelection::from_fragment_spread( fragment_spread_selection, - &fragment, + fragment, )?; destination.push(Selection::FragmentSpread(Arc::new( normalized_fragment_spread, @@ -3353,7 +3353,7 @@ impl NamedFragments { self.fragments.len() == 0 } - pub(crate) fn size(&self) -> usize { + pub(crate) fn len(&self) -> usize { self.fragments.len() } @@ -3390,11 +3390,11 @@ impl NamedFragments { } } - pub(crate) fn get(&self, name: &Name) -> Option> { - self.fragments.get(name).cloned() + pub(crate) fn get(&self, name: &str) -> Option<&Node> { + self.fragments.get(name) } - pub(crate) fn contains(&self, name: &Name) -> bool { + pub(crate) fn contains(&self, name: &str) -> bool { self.fragments.contains_key(name) } @@ -3508,55 +3508,6 @@ impl NamedFragments { } } -/// Tracks fragments from the original operation, along with versions rebased on other subgraphs. -// XXX(@goto-bus-stop): improve/replace/reduce this structure. My notes: -// This gets cloned only in recursive query planning. Then whenever `.for_subgraph()` ends up being -// called, it always clones the `rebased_fragments` map. `.for_subgraph()` is called whenever the -// plan is turned into plan nodes by the FetchDependencyGraphToQueryPlanProcessor. -// This suggests that we can remove the Arc wrapper for `rebased_fragments` because we end up cloning the inner data anyways. -// -// This data structure is also used as an argument in several `crate::operation` functions. This -// seems wrong. The only useful method on this structure is `.for_subgraph()`, which is only used -// by the fetch dependency graph when creating plan nodes. That necessarily implies that all other -// uses of this structure only access `.original_fragments`. In that case, we should pass around -// the `NamedFragments` itself, not this wrapper structure. -// -// `.for_subgraph()` also requires a mutable reference to fill in the data. But -// `.rebased_fragments` is really a cache, so requiring a mutable reference isn't an ideal API. -// Conceptually you are just computing something and getting the result. Perhaps we can use a -// concurrent map, or prepopulate the HashMap for all subgraphs, or precompute the whole thing for -// all subgraphs (or precompute a hash map of subgraph names to OnceLocks). -#[derive(Clone)] -pub(crate) struct RebasedFragments { - pub(crate) original_fragments: NamedFragments, - // JS PORT NOTE: In JS implementation values were optional - /// Map key: subgraph name - rebased_fragments: Arc, NamedFragments>>, -} - -impl RebasedFragments { - pub(crate) fn new(fragments: NamedFragments) -> Self { - Self { - original_fragments: fragments, - rebased_fragments: Arc::new(HashMap::new()), - } - } - - pub(crate) fn for_subgraph( - &mut self, - subgraph_name: impl Into>, - subgraph_schema: &ValidFederationSchema, - ) -> &NamedFragments { - Arc::make_mut(&mut self.rebased_fragments) - .entry(subgraph_name.into()) - .or_insert_with(|| { - self.original_fragments - .rebase_on(subgraph_schema) - .unwrap_or_default() - }) - } -} - // Collect fragment usages from operation types. impl Selection { diff --git a/apollo-federation/src/operation/optimize.rs b/apollo-federation/src/operation/optimize.rs index 13e56f748f..3590c34f1e 100644 --- a/apollo-federation/src/operation/optimize.rs +++ b/apollo-federation/src/operation/optimize.rs @@ -59,6 +59,9 @@ use super::SelectionMapperReturn; use super::SelectionOrSet; use super::SelectionSet; use crate::error::FederationError; +use crate::operation::FragmentSpread; +use crate::operation::FragmentSpreadData; +use crate::operation::SelectionValue; use crate::schema::position::CompositeTypeDefinitionPosition; #[derive(Debug)] @@ -197,7 +200,7 @@ impl NamedFragments { // PORT_NOTE: The JS version asserts if `updated` is empty or not. But, we really want to // check the `updated` has the same set of fragments. To avoid performance hit, only the // size is checked here. - if updated.size() != self.size() { + if updated.len() != self.len() { return Err(FederationError::internal( "Unexpected change in the number of fragments", )); @@ -1133,14 +1136,14 @@ impl NamedFragments { // to further reduction. // - It is hard to avoid this chain reaction, since we need to account for the effects of // normalization. - let mut last_size = self.size(); + let mut last_size = self.len(); let mut last_selection_set = selection_set.clone(); while last_size > 0 { let new_selection_set = self.reduce_inner(&last_selection_set, min_usage_to_optimize)?; // Reached a fix-point => stop - if self.size() == last_size { + if self.len() == last_size { // Assumes that `new_selection_set` is the same as `last_selection_set` in this // case. break; @@ -1159,7 +1162,7 @@ impl NamedFragments { // case without additional complexity. // Prepare the next iteration - last_size = self.size(); + last_size = self.len(); last_selection_set = new_selection_set; } Ok(last_selection_set) @@ -1191,7 +1194,7 @@ impl NamedFragments { // - We take advantage of the fact that `NamedFragments` is already sorted in dependency // order. // PORT_NOTE: The `computeFragmentsToKeep` function is implemented here. - let original_size = self.size(); + let original_size = self.len(); for fragment in self.iter_rev() { let usage_count = usages.get(&fragment.name).copied().unwrap_or_default(); if usage_count >= min_usage_to_optimize { @@ -1209,7 +1212,7 @@ impl NamedFragments { }); // Short-circuiting: Nothing was dropped (fully used) => Nothing to change. - if self.size() == original_size { + if self.len() == original_size { return Ok(selection_set.clone()); } @@ -1543,6 +1546,25 @@ impl Operation { self.reuse_fragments_inner(fragments, Self::DEFAULT_MIN_USAGES_TO_OPTIMIZE) } + /// Optimize the parsed size of the operation by generating fragments based on the selections + /// in the operation. + pub(crate) fn generate_fragments(&mut self) -> Result<(), FederationError> { + // Currently, this method simply pulls out every inline fragment into a named fragment. If + // multiple inline fragments are the same, they use the same named fragment. + // + // This method can generate named fragments that are only used once. It's not ideal, but it + // also doesn't seem that bad. Avoiding this is possible but more work, and keeping this + // as simple as possible is a big benefit for now. + // + // When we have more advanced correctness testing, we can add more features to fragment + // generation, like factoring out partial repeated slices of selection sets or only + // introducing named fragments for patterns that occur more than once. + let mut generator = FragmentGenerator::default(); + generator.visit_selection_set(&mut self.selection_set)?; + self.named_fragments = generator.into_inner(); + Ok(()) + } + /// Used by legacy roundtrip tests. /// - This lowers `min_usages_to_optimize` to `1` in order to make it easier to write unit tests. #[cfg(test)] @@ -1573,6 +1595,155 @@ impl Operation { } } +/// Returns a consistent GraphQL name for the given index. +fn fragment_name(mut index: usize) -> Name { + /// https://spec.graphql.org/draft/#NameContinue + const NAME_CHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"; + /// https://spec.graphql.org/draft/#NameStart + const NAME_START_CHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"; + + if index < NAME_START_CHARS.len() { + Name::new_static_unchecked(&NAME_START_CHARS[index..index + 1]) + } else { + let mut s = String::new(); + + let i = index % NAME_START_CHARS.len(); + s.push(NAME_START_CHARS.as_bytes()[i].into()); + index /= NAME_START_CHARS.len(); + + while index > 0 { + let i = index % NAME_CHARS.len(); + s.push(NAME_CHARS.as_bytes()[i].into()); + index /= NAME_CHARS.len(); + } + + Name::new_unchecked(&s) + } +} + +#[derive(Debug, Default)] +struct FragmentGenerator { + fragments: NamedFragments, +} + +impl FragmentGenerator { + fn next_name(&self) -> Name { + fragment_name(self.fragments.len()) + } + + /// Is a selection set worth using for a newly generated named fragment? + fn is_worth_using(selection_set: &SelectionSet) -> bool { + let mut iter = selection_set.iter(); + let Some(first) = iter.next() else { + // An empty selection is not worth using (and invalid!) + return false; + }; + let Selection::Field(field) = first else { + return true; + }; + // If there's more than one selection, or one selection with a subselection, + // it's probably worth using + iter.next().is_some() || field.selection_set.is_some() + } + + /// Modify the selection set so that eligible inline fragments are moved to named fragment spreads. + fn visit_selection_set( + &mut self, + selection_set: &mut SelectionSet, + ) -> Result<(), FederationError> { + let mut new_selection_set = SelectionSet::empty( + selection_set.schema.clone(), + selection_set.type_position.clone(), + ); + + for (_key, selection) in Arc::make_mut(&mut selection_set.selections).iter_mut() { + match selection { + SelectionValue::Field(mut field) => { + if let Some(selection_set) = field.get_selection_set_mut() { + self.visit_selection_set(selection_set)?; + } + new_selection_set + .add_local_selection(&Selection::Field(Arc::clone(field.get())))?; + } + SelectionValue::FragmentSpread(frag) => { + new_selection_set + .add_local_selection(&Selection::FragmentSpread(Arc::clone(frag.get())))?; + } + SelectionValue::InlineFragment(frag) + if !Self::is_worth_using(&frag.get().selection_set) => + { + new_selection_set + .add_local_selection(&Selection::InlineFragment(Arc::clone(frag.get())))?; + } + SelectionValue::InlineFragment(mut candidate) => { + self.visit_selection_set(candidate.get_selection_set_mut())?; + + let directives = &candidate.get().inline_fragment.directives; + let skip_include = directives + .iter() + .map(|directive| match directive.name.as_str() { + "skip" | "include" => Ok(directive.clone()), + _ => Err(()), + }) + .collect::>(); + + // If there are any directives *other* than @skip and @include, + // we can't just transfer them to the generated fragment spread, + // so we have to keep this inline fragment. + let Ok(skip_include) = skip_include else { + new_selection_set.add_local_selection(&Selection::InlineFragment( + Arc::clone(candidate.get()), + ))?; + continue; + }; + + let existing = self.fragments.iter().find(|existing| { + existing.type_condition_position + == candidate.get().inline_fragment.casted_type() + && existing.selection_set == candidate.get().selection_set + }); + + let existing = if let Some(existing) = existing { + existing + } else { + let name = self.next_name(); + self.fragments.insert(Fragment { + schema: selection_set.schema.clone(), + name: name.clone(), + type_condition_position: candidate.get().inline_fragment.casted_type(), + directives: Default::default(), + selection_set: candidate.get().selection_set.clone(), + }); + self.fragments.get(&name).unwrap() + }; + new_selection_set.add_local_selection(&Selection::from( + FragmentSpreadSelection { + spread: FragmentSpread::new(FragmentSpreadData { + schema: selection_set.schema.clone(), + fragment_name: existing.name.clone(), + type_condition_position: existing.type_condition_position.clone(), + directives: skip_include.into(), + fragment_directives: existing.directives.clone(), + selection_id: crate::operation::SelectionId::new(), + }), + selection_set: existing.selection_set.clone(), + }, + ))?; + } + } + } + + *selection_set = new_selection_set; + + Ok(()) + } + + /// Consumes the generator and returns the fragments it generated. + fn into_inner(self) -> NamedFragments { + self.fragments + } +} + //============================================================================= // Tests @@ -1600,6 +1771,13 @@ mod tests { }}; } + #[test] + fn generated_fragment_names() { + assert_eq!(fragment_name(0), "a"); + assert_eq!(fragment_name(100), "Vb"); + assert_eq!(fragment_name(usize::MAX), "oS5Uz8g3Iqw"); + } + #[test] fn duplicate_fragment_spreads_after_fragment_expansion() { // This is a regression test for FED-290, making sure `make_select` method can handle diff --git a/apollo-federation/src/operation/rebase.rs b/apollo-federation/src/operation/rebase.rs index 858d931b3f..99c770aa74 100644 --- a/apollo-federation/src/operation/rebase.rs +++ b/apollo-federation/src/operation/rebase.rs @@ -403,7 +403,7 @@ impl FragmentSpread { &self.schema, ) { Ok(FragmentSpread::new(FragmentSpreadData::from_fragment( - &named_fragment, + named_fragment, &self.directives, ))) } else { @@ -500,7 +500,7 @@ impl FragmentSpreadSelection { } let spread = FragmentSpread::new(FragmentSpreadData::from_fragment( - &named_fragment, + named_fragment, &self.spread.directives, )); Ok(FragmentSpreadSelection { @@ -1261,7 +1261,7 @@ type T { assert!(rebased_fragments.is_ok()); let rebased_fragments = rebased_fragments.unwrap(); // F1 reduces to nothing, and F2 reduces to just __typename so we shouldn't keep them. - assert_eq!(1, rebased_fragments.size()); + assert_eq!(1, rebased_fragments.len()); assert!(rebased_fragments.contains(&name!("F3"))); let rebased_fragment = rebased_fragments.fragments.get("F3").unwrap(); @@ -1337,7 +1337,7 @@ type T { assert!(rebased_fragments.is_ok()); let rebased_fragments = rebased_fragments.unwrap(); // F1 reduces to nothing, and F2 reduces to just __typename so we shouldn't keep them. - assert_eq!(1, rebased_fragments.size()); + assert_eq!(1, rebased_fragments.len()); assert!(rebased_fragments.contains(&name!("TheQuery"))); let rebased_fragment = rebased_fragments.fragments.get("TheQuery").unwrap(); @@ -1414,7 +1414,7 @@ type T { assert!(rebased_fragments.is_ok()); let rebased_fragments = rebased_fragments.unwrap(); // F1 reduces to nothing, and F2 reduces to just __typename so we shouldn't keep them. - assert_eq!(1, rebased_fragments.size()); + assert_eq!(1, rebased_fragments.len()); assert!(rebased_fragments.contains(&name!("TQuery"))); let rebased_fragment = rebased_fragments.fragments.get("TQuery").unwrap(); diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index d1360120c7..6d24b87285 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -29,6 +29,7 @@ use petgraph::visit::EdgeRef; use petgraph::visit::IntoNodeReferences; use serde::Serialize; +use super::query_planner::SubgraphOperationCompression; use crate::error::FederationError; use crate::error::SingleFederationError; use crate::link::graphql_definition::DeferDirectiveArguments; @@ -39,7 +40,6 @@ use crate::operation::InlineFragment; use crate::operation::InlineFragmentData; use crate::operation::InlineFragmentSelection; use crate::operation::Operation; -use crate::operation::RebasedFragments; use crate::operation::Selection; use crate::operation::SelectionId; use crate::operation::SelectionMap; @@ -2321,7 +2321,7 @@ impl FetchDependencyGraphNode { handled_conditions: &Conditions, variable_definitions: &[Node], operation_directives: &Arc, - fragments: Option<&mut RebasedFragments>, + operation_compression: &mut SubgraphOperationCompression, operation_name: Option, ) -> Result, FederationError> { if self.selection_set.selection_set.selections.is_empty() { @@ -2364,7 +2364,7 @@ impl FetchDependencyGraphNode { list }; - let mut operation = if self.is_entity_fetch { + let operation = if self.is_entity_fetch { operation_for_entities_fetch( subgraph_schema, selection, @@ -2382,12 +2382,8 @@ impl FetchDependencyGraphNode { &operation_name, )? }; - if let Some(fragments) = fragments - .map(|rebased| rebased.for_subgraph(self.subgraph_name.clone(), subgraph_schema)) - { - operation.reuse_fragments(fragments)?; - } - + let operation = + operation_compression.compress(&self.subgraph_name, subgraph_schema, operation)?; let operation_document = operation.try_into()?; let node = super::PlanNode::Fetch(Box::new(super::FetchNode { diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs b/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs index 66132a4ce5..e15134e4ab 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs @@ -6,8 +6,8 @@ use apollo_compiler::executable::VariableDefinition; use apollo_compiler::Name; use apollo_compiler::Node; +use super::query_planner::SubgraphOperationCompression; use crate::error::FederationError; -use crate::operation::RebasedFragments; use crate::operation::SelectionSet; use crate::query_graph::QueryGraph; use crate::query_plan::conditions::Conditions; @@ -48,7 +48,7 @@ const PIPELINING_COST: QueryPlanCost = 100.0; pub(crate) struct FetchDependencyGraphToQueryPlanProcessor { variable_definitions: Arc>>, operation_directives: Arc, - fragments: Option, + operation_compression: SubgraphOperationCompression, operation_name: Option, assigned_defer_labels: Option>, counter: u32, @@ -246,14 +246,14 @@ impl FetchDependencyGraphToQueryPlanProcessor { pub(crate) fn new( variable_definitions: Arc>>, operation_directives: Arc, - fragments: Option, + operation_compression: SubgraphOperationCompression, operation_name: Option, assigned_defer_labels: Option>, ) -> Self { Self { variable_definitions, operation_directives, - fragments, + operation_compression, operation_name, assigned_defer_labels, counter: 0, @@ -282,7 +282,7 @@ impl FetchDependencyGraphProcessor, DeferredDeferBlock> handled_conditions, &self.variable_definitions, &self.operation_directives, - self.fragments.as_mut(), + &mut self.operation_compression, op_name, ) } diff --git a/apollo-federation/src/query_plan/query_planner.rs b/apollo-federation/src/query_plan/query_planner.rs index 2a3cb21ef4..9e9f87c149 100644 --- a/apollo-federation/src/query_plan/query_planner.rs +++ b/apollo-federation/src/query_plan/query_planner.rs @@ -2,6 +2,7 @@ use std::cell::Cell; use std::num::NonZeroU32; use std::sync::Arc; +use apollo_compiler::collections::HashMap; use apollo_compiler::collections::IndexMap; use apollo_compiler::collections::IndexSet; use apollo_compiler::validation::Valid; @@ -15,7 +16,7 @@ use crate::error::SingleFederationError; use crate::link::federation_spec_definition::FederationSpecDefinition; use crate::operation::normalize_operation; use crate::operation::NamedFragments; -use crate::operation::RebasedFragments; +use crate::operation::Operation; use crate::operation::SelectionSet; use crate::query_graph::build_federated_query_graph; use crate::query_graph::path_tree::OpPathTree; @@ -368,7 +369,6 @@ impl QueryPlanner { } } - let reuse_query_fragments = self.config.reuse_query_fragments; let normalized_operation = normalize_operation( operation, NamedFragments::new(&document.fragments, &self.api_schema), @@ -433,23 +433,25 @@ impl QueryPlanner { ); }; - let rebased_fragments = if reuse_query_fragments { + let operation_compression = if self.config.generate_query_fragments { + SubgraphOperationCompression::GenerateFragments + } else if self.config.reuse_query_fragments { // For all subgraph fetches we query `__typename` on every abstract types (see // `FetchDependencyGraphNode::to_plan_node`) so if we want to have a chance to reuse // fragments, we should make sure those fragments also query `__typename` for every // abstract type. - Some(RebasedFragments::new( + SubgraphOperationCompression::ReuseFragments(RebasedFragments::new( normalized_operation .named_fragments .add_typename_field_for_abstract_types_in_named_fragments()?, )) } else { - None + SubgraphOperationCompression::Disabled }; let mut processor = FetchDependencyGraphToQueryPlanProcessor::new( normalized_operation.variables.clone(), normalized_operation.directives.clone(), - rebased_fragments, + operation_compression, operation.name.clone(), assigned_defer_labels, ); @@ -772,6 +774,67 @@ fn compute_plan_for_defer_conditionals( .into()) } +/// Tracks fragments from the original operation, along with versions rebased on other subgraphs. +pub(crate) struct RebasedFragments { + original_fragments: NamedFragments, + /// Map key: subgraph name + rebased_fragments: HashMap, NamedFragments>, +} + +impl RebasedFragments { + fn new(fragments: NamedFragments) -> Self { + Self { + original_fragments: fragments, + rebased_fragments: Default::default(), + } + } + + fn for_subgraph( + &mut self, + subgraph_name: impl Into>, + subgraph_schema: &ValidFederationSchema, + ) -> &NamedFragments { + self.rebased_fragments + .entry(subgraph_name.into()) + .or_insert_with(|| { + self.original_fragments + .rebase_on(subgraph_schema) + .unwrap_or_default() + }) + } +} + +pub(crate) enum SubgraphOperationCompression { + ReuseFragments(RebasedFragments), + GenerateFragments, + Disabled, +} + +impl SubgraphOperationCompression { + /// Compress a subgraph operation. + pub(crate) fn compress( + &mut self, + subgraph_name: &Arc, + subgraph_schema: &ValidFederationSchema, + operation: Operation, + ) -> Result { + match self { + Self::ReuseFragments(fragments) => { + let rebased = fragments.for_subgraph(Arc::clone(subgraph_name), subgraph_schema); + let mut operation = operation; + operation.reuse_fragments(rebased)?; + Ok(operation) + } + Self::GenerateFragments => { + let mut operation = operation; + operation.generate_fragments()?; + Ok(operation) + } + Self::Disabled => Ok(operation), + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs index edfdf0d972..42e13f473d 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs @@ -1,6 +1,8 @@ use apollo_federation::query_plan::query_planner::QueryPlannerConfig; const SUBGRAPH: &str = r#" + directive @custom on INLINE_FRAGMENT | FRAGMENT_SPREAD + type Query { t: T t2: T @@ -21,11 +23,9 @@ const SUBGRAPH: &str = r#" "#; #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: generate_query_fragments (https://apollographql.atlassian.net/browse/FED-76) fn it_respects_generate_query_fragments_option() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, ..Default::default() }, + config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, Subgraph1: SUBGRAPH, ); assert_plan!( @@ -48,34 +48,32 @@ fn it_respects_generate_query_fragments_option() { // Note: `... on B {}` won't be replaced, since it has only one field. @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - __typename - ..._generated_onA2_0 - ... on B { - z - } + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + __typename + ...a + ... on B { + z } } - - fragment _generated_onA2_0 on A { - x - y - } - }, - } + } + + fragment a on A { + x + y + } + }, + } "### ); } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: generate_query_fragments (https://apollographql.atlassian.net/browse/FED-76) fn it_handles_nested_fragment_generation() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, ..Default::default() }, + config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, Subgraph1: SUBGRAPH, ); assert_plan!( @@ -102,43 +100,41 @@ fn it_handles_nested_fragment_generation() { // Note: `... on B {}` won't be replaced, since it has only one field. @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - __typename - ..._generated_onA3_0 - } + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + __typename + ...b } + } - fragment _generated_onA2_0 on A { - x - y - } + fragment a on A { + x + y + } - fragment _generated_onA3_0 on A { - x - y - t { - __typename - ..._generated_onA2_0 - ... on B { - z - } + fragment b on A { + x + y + t { + __typename + ...a + ... on B { + z } } - }, - } - "### + } + }, + } + "### ); } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: generate_query_fragments (https://apollographql.atlassian.net/browse/FED-76) fn it_handles_fragments_with_one_non_leaf_field() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, ..Default::default() }, + config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, Subgraph1: SUBGRAPH, ); @@ -158,35 +154,102 @@ fn it_handles_fragments_with_one_non_leaf_field() { } "#, @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - __typename - ..._generated_onA1_0 + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + __typename + ...a + } + } + + fragment a on A { + t { + __typename + ... on B { + z } } + } + }, + } + "### + ); +} - fragment _generated_onA1_0 on A { - t { - __typename - ... on B { - z +#[test] +fn it_migrates_skip_include() { + let planner = planner!( + config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, + Subgraph1: SUBGRAPH, + ); + assert_plan!( + &planner, + r#" + query ($var: Boolean!) { + t { + ... on A { + x + y + t { + ... on A @include(if: $var) { + x + y + } + ... on A @skip(if: $var) { + x + y + } + ... on A @custom { + x + y + } } } } - }, - } - "### + } + "#, + + // Note: `... on A @custom {}` won't be replaced, since it has a custom directive. Even + // though it also supports being used on a named fragment spread, we cannot assume that + // the behaviour is exactly the same. + @r###" + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + __typename + ...b + } + } + + fragment a on A { + x + y + } + + fragment b on A { + x + y + t { + __typename + ...a @include(if: $var) + ...a @skip(if: $var) + ... on A @custom { + x + y + } + } + } + }, + } + "### ); } - #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: generate_query_fragments (https://apollographql.atlassian.net/browse/FED-76) fn it_identifies_and_reuses_equivalent_fragments_that_arent_identical() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, ..Default::default() }, + config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, Subgraph1: SUBGRAPH, ); assert_plan!( @@ -208,35 +271,33 @@ fn it_identifies_and_reuses_equivalent_fragments_that_arent_identical() { } "#, @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - __typename - ..._generated_onA2_0 - } - t2 { - __typename - ..._generated_onA2_0 - } + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + __typename + ...a } - - fragment _generated_onA2_0 on A { - x - y + t2 { + __typename + ...a } - }, - } + } + + fragment a on A { + x + y + } + }, + } "### ); } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: generate_query_fragments (https://apollographql.atlassian.net/browse/FED-76) fn fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment_definitions() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, ..Default::default() }, + config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, Subgraph1: SUBGRAPH, ); assert_plan!( @@ -258,30 +319,30 @@ fn fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment } "#, @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - __typename - ..._generated_onA2_0 - } - t2 { - __typename - ..._generated_onA2_1 - } + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + __typename + ...a } - - fragment _generated_onA2_0 on A { - x - y + t2 { + __typename + ...b } + } - fragment _generated_onA2_1 on A { - y - z - } - }, - } + fragment a on A { + x + y + } + + fragment b on A { + y + z + } + }, + } "### ); } diff --git a/apollo-federation/tests/query_plan/supergraphs/fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment_definitions.graphql b/apollo-federation/tests/query_plan/supergraphs/fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment_definitions.graphql index c887dbab71..eaf9c3e94d 100644 --- a/apollo-federation/tests/query_plan/supergraphs/fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment_definitions.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment_definitions.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: c53638411cdc10eaf883495993b90fdcc03b8d0e +# Composed from subgraphs with hash: 9d07e44c7cffd48b0677fce186f5ba41a864bc13 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @@ -6,6 +6,8 @@ schema query: Query } +directive @custom on FRAGMENT_SPREAD | INLINE_FRAGMENT + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION diff --git a/apollo-federation/tests/query_plan/supergraphs/it_handles_fragments_with_one_non_leaf_field.graphql b/apollo-federation/tests/query_plan/supergraphs/it_handles_fragments_with_one_non_leaf_field.graphql index c887dbab71..eaf9c3e94d 100644 --- a/apollo-federation/tests/query_plan/supergraphs/it_handles_fragments_with_one_non_leaf_field.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/it_handles_fragments_with_one_non_leaf_field.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: c53638411cdc10eaf883495993b90fdcc03b8d0e +# Composed from subgraphs with hash: 9d07e44c7cffd48b0677fce186f5ba41a864bc13 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @@ -6,6 +6,8 @@ schema query: Query } +directive @custom on FRAGMENT_SPREAD | INLINE_FRAGMENT + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION diff --git a/apollo-federation/tests/query_plan/supergraphs/it_handles_nested_fragment_generation.graphql b/apollo-federation/tests/query_plan/supergraphs/it_handles_nested_fragment_generation.graphql index c887dbab71..eaf9c3e94d 100644 --- a/apollo-federation/tests/query_plan/supergraphs/it_handles_nested_fragment_generation.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/it_handles_nested_fragment_generation.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: c53638411cdc10eaf883495993b90fdcc03b8d0e +# Composed from subgraphs with hash: 9d07e44c7cffd48b0677fce186f5ba41a864bc13 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @@ -6,6 +6,8 @@ schema query: Query } +directive @custom on FRAGMENT_SPREAD | INLINE_FRAGMENT + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION diff --git a/apollo-federation/tests/query_plan/supergraphs/it_identifies_and_reuses_equivalent_fragments_that_arent_identical.graphql b/apollo-federation/tests/query_plan/supergraphs/it_identifies_and_reuses_equivalent_fragments_that_arent_identical.graphql index c887dbab71..eaf9c3e94d 100644 --- a/apollo-federation/tests/query_plan/supergraphs/it_identifies_and_reuses_equivalent_fragments_that_arent_identical.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/it_identifies_and_reuses_equivalent_fragments_that_arent_identical.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: c53638411cdc10eaf883495993b90fdcc03b8d0e +# Composed from subgraphs with hash: 9d07e44c7cffd48b0677fce186f5ba41a864bc13 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @@ -6,6 +6,8 @@ schema query: Query } +directive @custom on FRAGMENT_SPREAD | INLINE_FRAGMENT + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION diff --git a/apollo-federation/tests/query_plan/supergraphs/it_migrates_skip_include.graphql b/apollo-federation/tests/query_plan/supergraphs/it_migrates_skip_include.graphql new file mode 100644 index 0000000000..eaf9c3e94d --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/it_migrates_skip_include.graphql @@ -0,0 +1,71 @@ +# Composed from subgraphs with hash: 9d07e44c7cffd48b0677fce186f5ba41a864bc13 +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query +} + +directive @custom on FRAGMENT_SPREAD | INLINE_FRAGMENT + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +type A + @join__type(graph: SUBGRAPH1) +{ + x: Int + y: Int + z: Int + t: T +} + +type B + @join__type(graph: SUBGRAPH1) +{ + z: Int +} + +scalar join__FieldSet + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "Subgraph1", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: SUBGRAPH1) +{ + t: T + t2: T +} + +union T + @join__type(graph: SUBGRAPH1) + @join__unionMember(graph: SUBGRAPH1, member: "A") + @join__unionMember(graph: SUBGRAPH1, member: "B") + = A | B diff --git a/apollo-federation/tests/query_plan/supergraphs/it_respects_generate_query_fragments_option.graphql b/apollo-federation/tests/query_plan/supergraphs/it_respects_generate_query_fragments_option.graphql index c887dbab71..eaf9c3e94d 100644 --- a/apollo-federation/tests/query_plan/supergraphs/it_respects_generate_query_fragments_option.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/it_respects_generate_query_fragments_option.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: c53638411cdc10eaf883495993b90fdcc03b8d0e +# Composed from subgraphs with hash: 9d07e44c7cffd48b0677fce186f5ba41a864bc13 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @@ -6,6 +6,8 @@ schema query: Query } +directive @custom on FRAGMENT_SPREAD | INLINE_FRAGMENT + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION From 0f6c7a71194f5fe2d7536e3247450303582916f0 Mon Sep 17 00:00:00 2001 From: Dariusz Kuc <9501705+dariuszkuc@users.noreply.github.com> Date: Thu, 8 Aug 2024 09:48:28 -0500 Subject: [PATCH 042/108] fix(federation): misc fixes (#5766) Fixes * Fix handling of `SubgraphEnteringTransition` which should skip `RootTypeResolution` edges. This fixes the issue with redundantly processing same edges over and over again. * Fix handling of `KeyResolution` which should compare whole selection sets of the conditions vs just the selections. * Ensures consistent order of child nodes when computing new key nodes * Add missing `self.on_modification()` when removing redundant edges * Returns false instead of throwing an error when calculating if new requires node is useless (i.e. return false if parent relation path is `None`) * Fix `PlanBuilder` trait to no longer swallow exceptions from query planning Co-authored-by: Duckki Oe --- .../src/query_graph/build_query_graph.rs | 5 +- .../src/query_plan/fetch_dependency_graph.rs | 54 +++- apollo-federation/src/query_plan/generate.rs | 12 +- .../query_plan/query_planning_traversal.rs | 20 +- .../build_query_plan_tests/requires.rs | 285 ++++++++++++++++++ ...ple_requires_with_multiple_fetches.graphql | 130 ++++++++ 6 files changed, 477 insertions(+), 29 deletions(-) create mode 100644 apollo-federation/tests/query_plan/supergraphs/it_handles_multiple_requires_with_multiple_fetches.graphql diff --git a/apollo-federation/src/query_graph/build_query_graph.rs b/apollo-federation/src/query_graph/build_query_graph.rs index 929e4b8d0e..38b2232dcc 100644 --- a/apollo-federation/src/query_graph/build_query_graph.rs +++ b/apollo-federation/src/query_graph/build_query_graph.rs @@ -1902,7 +1902,8 @@ impl FederatedQueryGraphBuilder { } .into()); }; - if conditions.selections == followup_conditions.selections { + + if conditions == followup_conditions { continue; } } @@ -1926,7 +1927,7 @@ impl FederatedQueryGraphBuilder { // since we can do "start of query" -> C and that's always better. if matches!( followup_edge_weight.transition, - QueryGraphEdgeTransition::SubgraphEnteringTransition + QueryGraphEdgeTransition::RootTypeResolution { .. } ) { continue; } diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index 6d24b87285..40961a8e5a 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -668,7 +668,9 @@ impl FetchDependencyGraph { // keeping nodes separated when they have a different path in their parent // allows to keep that "path in parent" more precisely, // which is important for some case of @requires). - for existing_id in self.children_of(parent.parent_node_id) { + for existing_id in + FetchDependencyGraph::sorted_nodes(self.children_of(parent.parent_node_id)) + { let existing = self.node_weight(existing_id)?; // we compare the subgraph names last because on average it improves performance if existing.merge_at.as_deref() == Some(merge_at) @@ -888,6 +890,27 @@ impl FetchDependencyGraph { }) } + /// By default, petgraph iterates over the nodes in the order of their node indices, but if + /// we retrieve node iterator based on the edges (e.g. children of/parents of), then resulting + /// iteration order is unspecified. In practice, it appears that edges are iterated in the + /// *reverse* iteration order. + /// + /// Since this behavior can affect the query plans, we can use this method to explicitly sort + /// the iterator to ensure we consistently follow the node index order. + /// + /// NOTE: In JS implementation, whenever we remove/merge nodes, we always shift left remaining + /// nodes so there are no gaps in the node IDs and the newly created nodes are always created + /// with the largest IDs. RS implementation has different behavior - whenever nodes are removed, + /// their IDs are later reused by petgraph so we no longer have guarantee that node with the + /// largest ID is the last node that was created. Due to the above, sorting by node IDs may still + /// result in different iteration order than the JS code, but in practice might be enough to + /// ensure correct plans. + fn sorted_nodes<'graph>( + nodes: impl Iterator + 'graph, + ) -> impl Iterator + 'graph { + nodes.sorted_by_key(|n| n.index()) + } + fn type_for_fetch_inputs( &self, type_name: &Name, @@ -898,7 +921,13 @@ impl FetchDependencyGraph { .try_into()?) } - /// Find redundant edges coming out of a node. See `remove_redundant_edges`. + /// Find redundant edges coming out of a node. See `remove_redundant_edges`. This method assumes + /// that the underlying graph does not have any cycles between nodes. + /// + /// PORT NOTE: JS implementation performs in-place removal of edges when finding the redundant + /// edges. In RS implementation we first collect the edges and then remove them. This has a side + /// effect that if we ever end up with a cycle in a graph (which is an invalid state), this method + /// may result in infinite loop. fn collect_redundant_edges(&self, node_index: NodeIndex, acc: &mut HashSet) { let mut stack = vec![]; for start_index in self.children_of(node_index) { @@ -907,7 +936,6 @@ impl FetchDependencyGraph { for edge in self.graph.edges_connecting(node_index, v) { acc.insert(edge.id()); } - stack.extend(self.children_of(v)); } } @@ -921,6 +949,9 @@ impl FetchDependencyGraph { let mut redundant_edges = HashSet::new(); self.collect_redundant_edges(node_index, &mut redundant_edges); + if !redundant_edges.is_empty() { + self.on_modification(); + } for edge in redundant_edges { self.graph.remove_edge(edge); } @@ -979,9 +1010,11 @@ impl FetchDependencyGraph { self.collect_redundant_edges(node_index, &mut redundant_edges); } - for edge in redundant_edges { - // PORT_NOTE: JS version calls `FetchGroup.removeChild`, which calls onModification. + // PORT_NOTE: JS version calls `FetchGroup.removeChild`, which calls onModification. + if !redundant_edges.is_empty() { self.on_modification(); + } + for edge in redundant_edges { self.graph.remove_edge(edge); } @@ -2123,18 +2156,17 @@ impl FetchDependencyGraph { let node = self.node_weight(node_id)?; let parent = self.node_weight(parent_relation.parent_node_id)?; let Some(parent_op_path) = &parent_relation.path_in_parent else { - return Err(FederationError::internal("Parent operation path is empty")); + return Ok(false); }; let type_at_path = self.type_at_path( &parent.selection_set.selection_set.type_position, &parent.selection_set.selection_set.schema, parent_op_path, )?; - let new_node_is_unneeded = parent_relation.path_in_parent.is_some() - && node - .selection_set - .selection_set - .can_rebase_on(&type_at_path, &parent.selection_set.selection_set.schema)?; + let new_node_is_unneeded = node + .selection_set + .selection_set + .can_rebase_on(&type_at_path, &parent.selection_set.selection_set.schema)?; Ok(new_node_is_unneeded) } diff --git a/apollo-federation/src/query_plan/generate.rs b/apollo-federation/src/query_plan/generate.rs index 0511deb498..4001b70f6f 100644 --- a/apollo-federation/src/query_plan/generate.rs +++ b/apollo-federation/src/query_plan/generate.rs @@ -19,7 +19,7 @@ struct Partial { // that implements all three methods. pub trait PlanBuilder { /// `add_to_plan`: how to obtain a new plan by taking some plan and adding a new element to it. - fn add_to_plan(&mut self, plan: &Plan, elem: Element) -> Plan; + fn add_to_plan(&mut self, plan: &Plan, elem: Element) -> Result; /// `compute_plan_cost`: how to compute the cost of a plan. fn compute_plan_cost(&mut self, plan: &mut Plan) -> Result; @@ -158,7 +158,7 @@ where let picked_index = pick_next(index, next_choices); let Extracted { extracted, is_last } = extract(picked_index, next_choices); - let mut new_partial_plan = plan_builder.add_to_plan(&partial_plan, extracted); + let mut new_partial_plan = plan_builder.add_to_plan(&partial_plan, extracted)?; let cost = plan_builder.compute_plan_cost(&mut new_partial_plan)?; if !is_last { @@ -252,7 +252,11 @@ mod tests { } impl<'a> PlanBuilder for TestPlanBuilder<'a> { - fn add_to_plan(&mut self, partial_plan: &Plan, new_element: Element) -> Plan { + fn add_to_plan( + &mut self, + partial_plan: &Plan, + new_element: Element, + ) -> Result { let new_plan: Plan = partial_plan .iter() .cloned() @@ -261,7 +265,7 @@ mod tests { if new_plan.len() == self.target_len { self.generated.push(new_plan.clone()) } - new_plan + Ok(new_plan) } fn compute_plan_cost(&mut self, plan: &mut Plan) -> Result { diff --git a/apollo-federation/src/query_plan/query_planning_traversal.rs b/apollo-federation/src/query_plan/query_planning_traversal.rs index edfdca8fa8..2dab41debf 100644 --- a/apollo-federation/src/query_plan/query_planning_traversal.rs +++ b/apollo-federation/src/query_plan/query_planning_traversal.rs @@ -1063,21 +1063,17 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { } impl<'a: 'b, 'b> PlanBuilder> for QueryPlanningTraversal<'a, 'b> { - fn add_to_plan(&mut self, plan_info: &PlanInfo, tree: Arc) -> PlanInfo { + fn add_to_plan( + &mut self, + plan_info: &PlanInfo, + tree: Arc, + ) -> Result { let mut updated_graph = plan_info.fetch_dependency_graph.clone(); - let result = self.updated_dependency_graph(&mut updated_graph, &tree); - if result.is_ok() { - PlanInfo { + self.updated_dependency_graph(&mut updated_graph, &tree) + .map(|_| PlanInfo { fetch_dependency_graph: updated_graph, path_tree: plan_info.path_tree.merge(&tree), - } - } else { - // Failed to update. Return the original plan. - PlanInfo { - fetch_dependency_graph: updated_graph, - path_tree: plan_info.path_tree.clone(), - } - } + }) } fn compute_plan_cost( diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs index 2b270770e1..d33ef66191 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs @@ -1453,3 +1453,288 @@ fn it_require_of_multiple_field_when_one_is_also_a_key_to_reach_another() { "### ); } + +#[test] +fn it_handles_multiple_requires_with_multiple_fetches() { + let planner = planner!( + s1: r#" + type Query { + t: T + } + + interface I { + id: ID! + name: String! + } + + type T implements I @key(fields: "id") { + id: ID! + name: String! @shareable + x: X @shareable + v: V @shareable + } + + type U implements I @key(fields: "id") { + id: ID! + name: String! @external + } + + type V @key(fields: "id") @key(fields: "internalID") { + id: ID! + internalID: ID! + } + + type X @key(fields: "t { id }") { + t: T! + isX: Boolean! + } + "#, + s2: r#" + type V @key(fields: "id") { + id: ID! + internalID: ID! @shareable + y: Y! @shareable + zz: [Z!] @external + } + + type Z { + u: U! @external + } + + type Y @key(fields: "id") { + id: ID! + isY: Boolean! @external + } + + interface I { + id: ID! + name: String! + } + + type T implements I @key(fields: "id") { + id: ID! + name: String! @external + x: X @external + v: V @external + foo: [String!]! @requires(fields: "x { isX }\nv { y { isY } }") + bar: [I!]! @requires(fields: "x { isX }\nv { y { isY } zz { u { id } } }") + } + + type X { + isX: Boolean! @external + } + + type U implements I @key(fields: "id") { + id: ID! + name: String! @external + } + "#, + s3: r#" + type V @key(fields: "internalID") { + internalID: ID! + y: Y! @shareable + } + + type Y @key(fields: "id") { + id: ID! + isY: Boolean! + } + "#, + s4: r#" + type V @key(fields: "id") @key(fields: "internalID") { + id: ID! + internalID: ID! + zz: [Z!] @override(from: "s1") + } + + type Z { + free: Boolean + u: U! + v: V! + } + + interface I { + id: ID! + name: String! + } + + type T implements I @key(fields: "id") { + id: ID! + name: String! @shareable + x: X @shareable + v: V @shareable + } + + type X @key(fields: "t { id }", resolvable: false) { + t: T! @external + } + + type U implements I @key(fields: "id") { + id: ID! + name: String! @override(from: "s1") + } + "#, + ); + assert_plan!( + &planner, + r#" + { + t { + foo + bar { + name + } + } + } + "#, + + @r###" + QueryPlan { + Sequence { + Fetch(service: "s1") { + { + t { + __typename + id + x { + isX + } + v { + __typename + internalID + } + } + } + }, + Flatten(path: "t") { + Fetch(service: "s4") { + { + ... on T { + __typename + id + } + } => + { + ... on T { + v { + __typename + internalID + zz { + u { + id + } + } + } + } + } + }, + }, + Flatten(path: "t.v") { + Fetch(service: "s3") { + { + ... on V { + __typename + internalID + } + } => + { + ... on V { + y { + isY + } + } + } + }, + }, + Parallel { + Sequence { + Flatten(path: "t") { + Fetch(service: "s2") { + { + ... on T { + __typename + x { + isX + } + v { + y { + isY + } + zz { + u { + id + } + } + } + id + } + } => + { + ... on T { + bar { + __typename + ... on T { + __typename + id + } + ... on U { + __typename + id + } + } + } + } + }, + }, + Flatten(path: "t.bar.@") { + Fetch(service: "s4") { + { + ... on T { + __typename + id + } + ... on U { + __typename + id + } + } => + { + ... on T { + name + } + ... on U { + name + } + } + }, + }, + }, + Flatten(path: "t") { + Fetch(service: "s2") { + { + ... on T { + __typename + x { + isX + } + v { + y { + isY + } + } + id + } + } => + { + ... on T { + foo + } + } + }, + }, + }, + }, + } + "### + ); +} diff --git a/apollo-federation/tests/query_plan/supergraphs/it_handles_multiple_requires_with_multiple_fetches.graphql b/apollo-federation/tests/query_plan/supergraphs/it_handles_multiple_requires_with_multiple_fetches.graphql new file mode 100644 index 0000000000..8e4d26a88e --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/it_handles_multiple_requires_with_multiple_fetches.graphql @@ -0,0 +1,130 @@ +# Composed from subgraphs with hash: 461e4a611a1faf2558d6ee6e3de4af24a043fc16 +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +interface I + @join__type(graph: S1) + @join__type(graph: S2) + @join__type(graph: S4) +{ + id: ID! + name: String! +} + +scalar join__FieldSet + +enum join__Graph { + S1 @join__graph(name: "s1", url: "none") + S2 @join__graph(name: "s2", url: "none") + S3 @join__graph(name: "s3", url: "none") + S4 @join__graph(name: "s4", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: S1) + @join__type(graph: S2) + @join__type(graph: S3) + @join__type(graph: S4) +{ + t: T @join__field(graph: S1) +} + +type T implements I + @join__implements(graph: S1, interface: "I") + @join__implements(graph: S2, interface: "I") + @join__implements(graph: S4, interface: "I") + @join__type(graph: S1, key: "id") + @join__type(graph: S2, key: "id") + @join__type(graph: S4, key: "id") +{ + id: ID! + name: String! @join__field(graph: S1) @join__field(graph: S2, external: true) @join__field(graph: S4) + x: X @join__field(graph: S1) @join__field(graph: S2, external: true) @join__field(graph: S4) + v: V @join__field(graph: S1) @join__field(graph: S2, external: true) @join__field(graph: S4) + foo: [String!]! @join__field(graph: S2, requires: "x { isX }\nv { y { isY } }") + bar: [I!]! @join__field(graph: S2, requires: "x { isX }\nv { y { isY } zz { u { id } } }") +} + +type U implements I + @join__implements(graph: S1, interface: "I") + @join__implements(graph: S2, interface: "I") + @join__implements(graph: S4, interface: "I") + @join__type(graph: S1, key: "id") + @join__type(graph: S2, key: "id") + @join__type(graph: S4, key: "id") +{ + id: ID! + name: String! @join__field(graph: S1, external: true) @join__field(graph: S2, external: true) @join__field(graph: S4, override: "s1") +} + +type V + @join__type(graph: S1, key: "id") + @join__type(graph: S1, key: "internalID") + @join__type(graph: S2, key: "id") + @join__type(graph: S3, key: "internalID") + @join__type(graph: S4, key: "id") + @join__type(graph: S4, key: "internalID") +{ + id: ID! @join__field(graph: S1) @join__field(graph: S2) @join__field(graph: S4) + internalID: ID! + y: Y! @join__field(graph: S2) @join__field(graph: S3) + zz: [Z!] @join__field(graph: S2, external: true) @join__field(graph: S4, override: "s1") +} + +type X + @join__type(graph: S1, key: "t { id }") + @join__type(graph: S2) + @join__type(graph: S4, key: "t { id }", resolvable: false) +{ + t: T! @join__field(graph: S1) @join__field(graph: S4, external: true) + isX: Boolean! @join__field(graph: S1) @join__field(graph: S2, external: true) +} + +type Y + @join__type(graph: S2, key: "id") + @join__type(graph: S3, key: "id") +{ + id: ID! + isY: Boolean! @join__field(graph: S2, external: true) @join__field(graph: S3) +} + +type Z + @join__type(graph: S2) + @join__type(graph: S4) +{ + u: U! @join__field(graph: S2, external: true) @join__field(graph: S4) + free: Boolean @join__field(graph: S4) + v: V! @join__field(graph: S4) +} From 31a99d7ae0aa34aacfd290aad1b905639ac226e7 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Fri, 9 Aug 2024 11:38:29 +0200 Subject: [PATCH 043/108] move plugin creation to a function (#5786) This reduces the compiled size of the `create_plugins` function, one of our largest ones for now --- apollo-router/src/router_factory.rs | 66 ++++++++++++++++++++--------- 1 file changed, 46 insertions(+), 20 deletions(-) diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index 9c3388a963..37bdbc5e00 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -532,6 +532,40 @@ caused by ); } +#[allow(clippy::too_many_arguments)] +pub(crate) async fn add_plugin( + name: String, + factory: &PluginFactory, + plugin_config: &Value, + schema: Arc, + supergraph_schema: Arc>, + subgraph_schemas: Arc>>>, + notify: &crate::notification::Notify, + plugin_instances: &mut Plugins, + errors: &mut Vec, +) { + match factory + .create_instance( + PluginInit::builder() + .config(plugin_config.clone()) + .supergraph_sdl(schema) + .supergraph_schema(supergraph_schema) + .subgraph_schemas(subgraph_schemas) + .notify(notify.clone()) + .build(), + ) + .await + { + Ok(plugin) => { + let _ = plugin_instances.insert(name, plugin); + } + Err(err) => errors.push(ConfigurationError::PluginConfiguration { + plugin: name, + error: err.to_string(), + }), + } +} + pub(crate) async fn create_plugins( configuration: &Configuration, schema: &Schema, @@ -565,26 +599,18 @@ pub(crate) async fn create_plugins( // Use function-like macros to avoid borrow conflicts of captures macro_rules! add_plugin { ($name: expr, $factory: expr, $plugin_config: expr) => {{ - match $factory - .create_instance( - PluginInit::builder() - .config($plugin_config) - .supergraph_sdl(schema.as_string().clone()) - .supergraph_schema(supergraph_schema.clone()) - .subgraph_schemas(subgraph_schemas.clone()) - .notify(configuration.notify.clone()) - .build(), - ) - .await - { - Ok(plugin) => { - let _ = plugin_instances.insert($name, plugin); - } - Err(err) => errors.push(ConfigurationError::PluginConfiguration { - plugin: $name, - error: err.to_string(), - }), - } + add_plugin( + $name, + $factory, + &$plugin_config, + schema.as_string().clone(), + supergraph_schema.clone(), + subgraph_schemas.clone(), + &configuration.notify.clone(), + &mut plugin_instances, + &mut errors, + ) + .await; }}; } From 5421580ead8b2351b54d57d51be07ecb426335e9 Mon Sep 17 00:00:00 2001 From: Simon Sapin Date: Fri, 9 Aug 2024 17:46:19 +0200 Subject: [PATCH 044/108] Add `experimental_query_planner_mode: both_best_effort` config (#5790) --- apollo-router/src/configuration/mod.rs | 22 +++++- ...nfiguration__tests__schema_generation.snap | 11 ++- .../src/query_planner/bridge_query_planner.rs | 20 ++++++ .../query_planner/caching_query_planner.rs | 4 ++ apollo-router/src/router_factory.rs | 3 +- apollo-router/tests/integration/lifecycle.rs | 70 +++++++++++++++++++ 6 files changed, 124 insertions(+), 6 deletions(-) diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index e3f5ee071e..2559b69458 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -218,16 +218,32 @@ pub(crate) enum ApolloMetricsGenerationMode { /// Query planner modes. #[derive(Clone, PartialEq, Eq, Default, Derivative, Serialize, Deserialize, JsonSchema)] #[derivative(Debug)] -#[serde(rename_all = "lowercase")] +#[serde(rename_all = "snake_case")] pub(crate) enum QueryPlannerMode { /// Use the new Rust-based implementation. + /// + /// Raises an error at Router startup if the the new planner does not support the schema + /// (such as using legacy Apollo Federation 1) New, /// Use the old JavaScript-based implementation. #[default] Legacy, - /// Use Rust-based and Javascript-based implementations side by side, logging warnings if the - /// implementations disagree. + /// Use primarily the Javascript-based implementation, + /// but also schedule background jobs to run the Rust implementation and compare results, + /// logging warnings if the implementations disagree. + /// + /// Raises an error at Router startup if the the new planner does not support the schema + /// (such as using legacy Apollo Federation 1) Both, + /// Use primarily the Javascript-based implementation, + /// but also schedule on a best-effort basis background jobs + /// to run the Rust implementation and compare results, + /// logging warnings if the implementations disagree. + /// + /// Falls back to `legacy` with a warning + /// if the the new planner does not support the schema + /// (such as using legacy Apollo Federation 1) + BothBestEffort, } impl<'de> serde::Deserialize<'de> for Configuration { diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 9b6dabdb82..74294b07ba 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -4386,7 +4386,7 @@ expression: "&schema" "description": "Query planner modes.", "oneOf": [ { - "description": "Use the new Rust-based implementation.", + "description": "Use the new Rust-based implementation.\n\nRaises an error at Router startup if the the new planner does not support the schema (such as using legacy Apollo Federation 1)", "enum": [ "new" ], @@ -4400,11 +4400,18 @@ expression: "&schema" "type": "string" }, { - "description": "Use Rust-based and Javascript-based implementations side by side, logging warnings if the implementations disagree.", + "description": "Use primarily the Javascript-based implementation, but also schedule background jobs to run the Rust implementation and compare results, logging warnings if the implementations disagree.\n\nRaises an error at Router startup if the the new planner does not support the schema (such as using legacy Apollo Federation 1)", "enum": [ "both" ], "type": "string" + }, + { + "description": "Use primarily the Javascript-based implementation, but also schedule on a best-effort basis background jobs to run the Rust implementation and compare results, logging warnings if the implementations disagree.\n\nFalls back to `legacy` with a warning if the the new planner does not support the schema (such as using legacy Apollo Federation 1)", + "enum": [ + "both_best_effort" + ], + "type": "string" } ] }, diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index f5772b4cbf..1afbb60888 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -137,6 +137,16 @@ impl PlannerMode { "expected Rust QP instance for `experimental_query_planner_mode: both`", ), }, + QueryPlannerMode::BothBestEffort => { + if let Some(rust) = rust_planner { + Self::Both { + js: Self::js(&schema.raw_sdl, configuration, old_planner).await?, + rust, + } + } else { + Self::Js(Self::js(&schema.raw_sdl, configuration, old_planner).await?) + } + } }) } @@ -149,6 +159,16 @@ impl PlannerMode { QueryPlannerMode::New | QueryPlannerMode::Both => { Ok(Some(Self::rust(schema, configuration)?)) } + QueryPlannerMode::BothBestEffort => match Self::rust(schema, configuration) { + Ok(planner) => Ok(Some(planner)), + Err(error) => { + tracing::warn!( + "Failed to initialize the new query planner, \ + falling back to legacy: {error}" + ); + Ok(None) + } + }, } } diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index f6718143ec..20b2f75342 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -62,6 +62,7 @@ pub(crate) enum ConfigMode { // for now use the JS config as it expected to be identical to the Rust one Rust(Arc), Both(Arc), + BothBestEffort(Arc), Js(Arc), } @@ -135,6 +136,9 @@ where crate::configuration::QueryPlannerMode::Both => { ConfigMode::Both(Arc::new(configuration.js_query_planner_config())) } + crate::configuration::QueryPlannerMode::BothBestEffort => { + ConfigMode::BothBestEffort(Arc::new(configuration.js_query_planner_config())) + } }; Ok(Self { cache, diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index 37bdbc5e00..fa2d8aab66 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -868,7 +868,8 @@ fn can_use_with_experimental_query_planner( Ok(()) } - crate::configuration::QueryPlannerMode::Legacy => Ok(()), + crate::configuration::QueryPlannerMode::Legacy + | crate::configuration::QueryPlannerMode::BothBestEffort => Ok(()), } } #[cfg(test)] diff --git a/apollo-router/tests/integration/lifecycle.rs b/apollo-router/tests/integration/lifecycle.rs index 71af2dbcf8..2f7feea952 100644 --- a/apollo-router/tests/integration/lifecycle.rs +++ b/apollo-router/tests/integration/lifecycle.rs @@ -460,3 +460,73 @@ fn test_plugin_ordering_push_trace(context: &Context, entry: String) { ) .unwrap(); } + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_legacy_qp() { + let mut router = IntegrationTest::builder() + .config("experimental_query_planner_mode: legacy") + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_new_qp() { + let mut router = IntegrationTest::builder() + .config("experimental_query_planner_mode: new") + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + The supergraph schema failed to produce a valid API schema: \ + Supergraphs composed with federation version 1 are not supported.", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_both_qp() { + let mut router = IntegrationTest::builder() + .config("experimental_query_planner_mode: both") + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + The supergraph schema failed to produce a valid API schema: \ + Supergraphs composed with federation version 1 are not supported.", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_both_best_effort_qp() { + let mut router = IntegrationTest::builder() + .config("experimental_query_planner_mode: both_best_effort") + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "Failed to initialize the new query planner, falling back to legacy: \ + The supergraph schema failed to produce a valid API schema: \ + Supergraphs composed with federation version 1 are not supported. \ + Please recompose your supergraph with federation version 2 or greater", + ) + .await; + router.assert_started().await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} From d30ff2f1b0188616084673a3d22f32c9b7c503d1 Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Mon, 12 Aug 2024 09:26:09 +0200 Subject: [PATCH 045/108] add the ability for gt/lt conditions to parse the string selector to number (#5758) Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- .../fix_bnjjj_improve_gt_lt_conditions.md | 21 +++++++ apollo-router/src/plugins/telemetry/config.rs | 12 ++++ .../telemetry/config_new/conditions.rs | 60 ++++++++++++++----- 3 files changed, 77 insertions(+), 16 deletions(-) create mode 100644 .changesets/fix_bnjjj_improve_gt_lt_conditions.md diff --git a/.changesets/fix_bnjjj_improve_gt_lt_conditions.md b/.changesets/fix_bnjjj_improve_gt_lt_conditions.md new file mode 100644 index 0000000000..bd7f0fc99e --- /dev/null +++ b/.changesets/fix_bnjjj_improve_gt_lt_conditions.md @@ -0,0 +1,21 @@ +### Add the ability for `gt`/`lt` conditions to parse the string selector to number ([PR #5758](https://github.com/apollographql/router/pull/5758)) + +This will enable the ability to have gt/lt conditions on header selectors for example, if you want to put a specific attribute on a span if the `content-length` header is greater than 100: + +```yaml +telemetry: + instrumentation: + spans: + mode: spec_compliant + router: + attributes: + trace_id: true + payload_is_to_big: # Set this attribute to true if the value of content-length header is > than 100 + static: true + condition: + gt: + - request_header: "content-length" + - 100 +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5758 \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index 59eee4939a..42c6090b3b 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -430,6 +430,18 @@ pub(crate) enum AttributeValue { Array(AttributeArray), } +impl AttributeValue { + pub(crate) fn as_f64(&self) -> Option { + match self { + AttributeValue::Bool(_) => None, + AttributeValue::I64(v) => Some(*v as f64), + AttributeValue::F64(v) => Some(*v), + AttributeValue::String(v) => v.parse::().ok(), + AttributeValue::Array(_) => None, + } + } +} + impl From for AttributeValue { fn from(value: String) -> Self { AttributeValue::String(value) diff --git a/apollo-router/src/plugins/telemetry/config_new/conditions.rs b/apollo-router/src/plugins/telemetry/config_new/conditions.rs index f4782a3c13..d3c44610a0 100644 --- a/apollo-router/src/plugins/telemetry/config_new/conditions.rs +++ b/apollo-router/src/plugins/telemetry/config_new/conditions.rs @@ -89,15 +89,26 @@ where gt[1] = SelectorOrValue::Value(r); None } - (Some(l), Some(r)) => { - if l > r { - *self = Condition::True; - Some(true) - } else { - *self = Condition::False; - Some(false) + (Some(l), Some(r)) => match (l.as_f64(), r.as_f64()) { + (Some(l), Some(r)) => { + if l > r { + *self = Condition::True; + Some(true) + } else { + *self = Condition::False; + Some(false) + } } - } + _ => { + if l > r { + *self = Condition::True; + Some(true) + } else { + *self = Condition::False; + Some(false) + } + } + }, } } Condition::Lt(lt) => { @@ -113,15 +124,26 @@ where lt[1] = SelectorOrValue::Value(r); None } - (Some(l), Some(r)) => { - if l < r { - *self = Condition::True; - Some(true) - } else { - *self = Condition::False; - Some(false) + (Some(l), Some(r)) => match (l.as_f64(), r.as_f64()) { + (Some(l), Some(r)) => { + if l < r { + *self = Condition::True; + Some(true) + } else { + *self = Condition::False; + Some(false) + } } - } + _ => { + if l < r { + *self = Condition::True; + Some(true) + } else { + *self = Condition::False; + Some(false) + } + } + }, } } Condition::Exists(exist) => { @@ -577,6 +599,7 @@ mod test { #[test] fn test_condition_gt() { + test_gt("2", "1", "1"); test_gt(2, 1, 1); test_gt(2.0, 1.0, 1.0); test_gt("b", "a", "a"); @@ -604,8 +627,10 @@ mod test { #[test] fn test_condition_lt() { + test_lt("1", "2", "2"); test_lt(1, 2, 2); test_lt(1.0, 2.0, 2.0); + test_lt("1.0", "2.0", "2.0"); test_lt("a", "b", "b"); assert_eq!(lt(true, false).req(None), Some(false)); assert_eq!(lt(false, true).req(None), Some(true)); @@ -707,6 +732,8 @@ mod test { assert_eq!(gt(Req, 1).req(Some(2i64)), Some(true)); assert_eq!(gt(Req, 1).req(None), None); + assert_eq!(gt("2", Req).req(Some(1i64)), Some(true)); + assert_eq!(gt("2.1", Req).req(Some(1i64)), Some(true)); assert_eq!(gt(2, Req).req(Some(1i64)), Some(true)); assert_eq!(gt(2, Req).req(None), None); assert_eq!(gt(Req, Req).req(Some(1i64)), Some(false)); @@ -745,6 +772,7 @@ mod test { assert_eq!(lt(2, 1).evaluate_drop(), Some(false)); assert_eq!(lt(Static(1), 2).evaluate_drop(), Some(true)); assert_eq!(lt(2, Static(1)).evaluate_drop(), Some(false)); + assert_eq!(gt("2", "1").evaluate_drop(), Some(true)); assert_eq!(gt(2, 1).evaluate_drop(), Some(true)); assert_eq!(gt(1, 2).evaluate_drop(), Some(false)); assert_eq!(gt(Static(2), 1).evaluate_drop(), Some(true)); From b62865e9c11e8dd9d2721264cc819a30e36d4b2c Mon Sep 17 00:00:00 2001 From: Duckki Oe Date: Mon, 12 Aug 2024 02:30:28 -0700 Subject: [PATCH 046/108] fix(federation): fixed a QP panic saying "would create a cycle" (#5797) --- .../src/query_plan/fetch_dependency_graph.rs | 4 +- .../query_plan/build_query_plan_tests.rs | 194 ++++++++++++++++++ .../build_query_plan_tests/requires.rs | 46 ++--- ...te_cycle_in_fetch_dependency_graph.graphql | 94 +++++++++ 4 files changed, 314 insertions(+), 24 deletions(-) create mode 100644 apollo-federation/tests/query_plan/supergraphs/test_merging_fetches_do_not_create_cycle_in_fetch_dependency_graph.graphql diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index 40961a8e5a..93d418606d 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -1451,7 +1451,9 @@ impl FetchDependencyGraph { // subgraph name, but have no worries for `mergeAt` since it contains either number of // field names, and the later is restricted by graphQL so as to not be an issue. let mut by_subgraphs = MultiMap::new(); - for node_index in self.graph.node_indices() { + let sorted_nodes = petgraph::algo::toposort(&self.graph, None) + .map_err(|_| FederationError::internal("Failed to sort nodes due to cycle(s)"))?; + for node_index in sorted_nodes { let node = self.node_weight(node_index)?; // We exclude nodes without inputs because that's what we look for. In practice, this // mostly just exclude root nodes, which we don't really want to bother with anyway. diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests.rs b/apollo-federation/tests/query_plan/build_query_plan_tests.rs index e849bf74c2..a5a8769c2a 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests.rs @@ -720,3 +720,197 @@ fn defer_gets_stripped_out() { ); assert_eq!(plan_one, plan_two) } + +#[test] +fn test_merging_fetches_do_not_create_cycle_in_fetch_dependency_graph() { + // This is a test for ROUTER-546 (the second part). + let planner = planner!( + S: r#" + type Query { + start: T! + } + + type T @key(fields: "id") { + id: String! + } + "#, + A: r#" + type T @key(fields: "id") { + id: String! @shareable + u: U! @shareable + } + + type U @key(fields: "id") { + id: ID! + a: String! @shareable + b: String @shareable + } + "#, + B: r#" + type T @key(fields: "id") { + id: String! @external + u: U! @shareable + } + + type U @key(fields: "id") { + id: ID! + a: String! @shareable + # Note: b is not here. + } + + # This definition is necessary. + extend type W @key(fields: "id") { + id: ID @external + } + "#, + C: r#" + extend type U @key(fields: "id") { + id: ID! @external + a: String! @external + b: String @external + w: W @requires(fields: "a b") + } + + type W @key(fields: "id") { + id: ID + y: Y + w1: Int + w2: Int + w3: Int + w4: Int + w5: Int + } + + type Y { + y1: Int + y2: Int + y3: Int + } + "#, + ); + assert_plan!( + &planner, + r#" + { + start { + u { + w { + id + w1 + w2 + w3 + w4 + w5 + y { + y1 + y2 + y3 + } + } + } + } + } + "#, + @r###" + QueryPlan { + Sequence { + Fetch(service: "S") { + { + start { + __typename + id + } + } + }, + Parallel { + Sequence { + Flatten(path: "start") { + Fetch(service: "B") { + { + ... on T { + __typename + id + } + } => + { + ... on T { + u { + __typename + id + } + } + } + }, + }, + Flatten(path: "start.u") { + Fetch(service: "A") { + { + ... on U { + __typename + id + } + } => + { + ... on U { + b + a + } + } + }, + }, + }, + Flatten(path: "start") { + Fetch(service: "A") { + { + ... on T { + __typename + id + } + } => + { + ... on T { + u { + __typename + id + b + a + } + } + } + }, + }, + }, + Flatten(path: "start.u") { + Fetch(service: "C") { + { + ... on U { + __typename + a + b + id + } + } => + { + ... on U { + w { + y { + y1 + y2 + y3 + } + id + w1 + w2 + w3 + w4 + w5 + } + } + } + }, + }, + }, + } + "### + ); +} diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs index d33ef66191..b218969a35 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs @@ -1647,6 +1647,29 @@ fn it_handles_multiple_requires_with_multiple_fetches() { }, }, Parallel { + Flatten(path: "t") { + Fetch(service: "s2") { + { + ... on T { + __typename + x { + isX + } + v { + y { + isY + } + } + id + } + } => + { + ... on T { + foo + } + } + }, + }, Sequence { Flatten(path: "t") { Fetch(service: "s2") { @@ -1709,29 +1732,6 @@ fn it_handles_multiple_requires_with_multiple_fetches() { }, }, }, - Flatten(path: "t") { - Fetch(service: "s2") { - { - ... on T { - __typename - x { - isX - } - v { - y { - isY - } - } - id - } - } => - { - ... on T { - foo - } - } - }, - }, }, }, } diff --git a/apollo-federation/tests/query_plan/supergraphs/test_merging_fetches_do_not_create_cycle_in_fetch_dependency_graph.graphql b/apollo-federation/tests/query_plan/supergraphs/test_merging_fetches_do_not_create_cycle_in_fetch_dependency_graph.graphql new file mode 100644 index 0000000000..aea8d395a8 --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/test_merging_fetches_do_not_create_cycle_in_fetch_dependency_graph.graphql @@ -0,0 +1,94 @@ +# Composed from subgraphs with hash: 58cfa42df5c5f20fb0fbe43d4a506b3654439de1 +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +scalar join__FieldSet + +enum join__Graph { + A @join__graph(name: "A", url: "none") + B @join__graph(name: "B", url: "none") + C @join__graph(name: "C", url: "none") + S @join__graph(name: "S", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: A) + @join__type(graph: B) + @join__type(graph: C) + @join__type(graph: S) +{ + start: T! @join__field(graph: S) +} + +type T + @join__type(graph: A, key: "id") + @join__type(graph: B, key: "id") + @join__type(graph: S, key: "id") +{ + id: String! @join__field(graph: A) @join__field(graph: B, external: true) @join__field(graph: S) + u: U! @join__field(graph: A) @join__field(graph: B) +} + +type U + @join__type(graph: A, key: "id") + @join__type(graph: B, key: "id") + @join__type(graph: C, key: "id", extension: true) +{ + id: ID! + a: String! @join__field(graph: A) @join__field(graph: B) @join__field(graph: C, external: true) + b: String @join__field(graph: A) @join__field(graph: C, external: true) + w: W @join__field(graph: C, requires: "a b") +} + +type W + @join__type(graph: B, key: "id", extension: true) + @join__type(graph: C, key: "id") +{ + id: ID + y: Y @join__field(graph: C) + w1: Int @join__field(graph: C) + w2: Int @join__field(graph: C) + w3: Int @join__field(graph: C) + w4: Int @join__field(graph: C) + w5: Int @join__field(graph: C) +} + +type Y + @join__type(graph: C) +{ + y1: Int + y2: Int + y3: Int +} From b337febd60f9de484e651a8ef73b70eb9e4ce44c Mon Sep 17 00:00:00 2001 From: Dariusz Kuc <9501705+dariuszkuc@users.noreply.github.com> Date: Mon, 12 Aug 2024 13:38:39 -0500 Subject: [PATCH 047/108] chore(federation): fix merged abstract types test (#5796) Due to the differences between JS and RS graph structures (JS is always topologically sorted) we occassionally end up with different parallel fetch node order.... --- .../merged_abstract_types_handling.rs | 34 +++++++++---------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/merged_abstract_types_handling.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/merged_abstract_types_handling.rs index 31b5503f42..82eade721c 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/merged_abstract_types_handling.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/merged_abstract_types_handling.rs @@ -632,8 +632,6 @@ fn handles_spread_unions_correctly() { } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: investigate this failure (reverse order of parallel fetches) fn handles_case_of_key_chains_in_parallel_requires() { let planner = planner!( Subgraph1: r#" @@ -706,6 +704,22 @@ fn handles_case_of_key_chains_in_parallel_requires() { } }, Parallel { + Flatten(path: "t") { + Fetch(service: "Subgraph3") { + { + ... on T2 { + __typename + id + y + } + } => + { + ... on T2 { + z + } + } + }, + }, Sequence { Flatten(path: "t") { Fetch(service: "Subgraph2") { @@ -738,22 +752,6 @@ fn handles_case_of_key_chains_in_parallel_requires() { }, }, }, - Flatten(path: "t") { - Fetch(service: "Subgraph3") { - { - ... on T2 { - __typename - id - y - } - } => - { - ... on T2 { - z - } - } - }, - }, }, }, } From 1d5b86af6ffa379afc13db1114622b42ac6175eb Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 13 Aug 2024 09:14:00 +0200 Subject: [PATCH 048/108] Entity cache: reactivate integration tests (#5779) This fixes the cache integration tests that were flaky. This is due in part to all the tests using the same subgraph names and types, on the same redis instance. This applies the following fixes: * use text based logging instead of JSON for more readability * unique query names per test * unique subgraph names per test * remove cached data at the end of the test (not needed for the CI? but it helps for local tests) * raise the SCAN command's COUNT argument (max number of keys returned by one invocation of scan) from 10 to 100 --- .../src/plugins/cache/invalidation.rs | 9 ++-- apollo-router/tests/common.rs | 2 - .../configuration.yaml | 10 ++++- .../{skipped.json => plan.json} | 44 +++++++++++-------- .../supergraph.graphql | 8 ++-- .../README.md | 0 .../configuration.yaml | 8 +++- .../plan.json} | 24 +++++----- .../supergraph.graphql | 2 +- .../configuration.yaml | 8 +++- .../{skipped.json => plan.json} | 34 ++++++++++---- .../supergraph.graphql | 2 +- .../query-planning-redis/configuration.yaml | 6 +++ apollo-router/tests/samples_tests.rs | 4 +- 14 files changed, 103 insertions(+), 58 deletions(-) rename apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/{skipped.json => plan.json} (75%) rename apollo-router/tests/samples/enterprise/entity-cache/{invalidation-subgraph => invalidation-subgraph-name}/README.md (100%) rename apollo-router/tests/samples/enterprise/entity-cache/{invalidation-subgraph => invalidation-subgraph-name}/configuration.yaml (78%) rename apollo-router/tests/samples/enterprise/entity-cache/{invalidation-subgraph/skipped.json => invalidation-subgraph-name/plan.json} (77%) rename apollo-router/tests/samples/enterprise/entity-cache/{invalidation-subgraph => invalidation-subgraph-name}/supergraph.graphql (96%) rename apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/{skipped.json => plan.json} (74%) diff --git a/apollo-router/src/plugins/cache/invalidation.rs b/apollo-router/src/plugins/cache/invalidation.rs index a2c2bc80d6..77736f6598 100644 --- a/apollo-router/src/plugins/cache/invalidation.rs +++ b/apollo-router/src/plugins/cache/invalidation.rs @@ -161,7 +161,7 @@ async fn handle_request( ); // FIXME: configurable batch size - let mut stream = storage.scan(key_prefix.clone(), Some(10)); + let mut stream = storage.scan(key_prefix.clone(), Some(100)); let mut count = 0u64; let mut error = None; @@ -184,7 +184,6 @@ async fn handle_request( .map(|k| RedisKey(k.to_string())) .collect::>(); if !keys.is_empty() { - tracing::debug!("deleting keys: {keys:?}"); count += keys.len() as u64; storage.delete(keys).await; @@ -270,10 +269,10 @@ impl InvalidationRequest { fn key_prefix(&self) -> String { match self { InvalidationRequest::Subgraph { subgraph } => { - format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}*",) + format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}:*",) } InvalidationRequest::Type { subgraph, r#type } => { - format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}:type:{type}*",) + format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}:type:{type}:*",) } InvalidationRequest::Entity { subgraph, @@ -281,7 +280,7 @@ impl InvalidationRequest { key, } => { let entity_key = hash_entity_key(key); - format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}:type:{type}:entity:{entity_key}*") + format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}:type:{type}:entity:{entity_key}:*") } } } diff --git a/apollo-router/tests/common.rs b/apollo-router/tests/common.rs index 4265605971..35a1115495 100644 --- a/apollo-router/tests/common.rs +++ b/apollo-router/tests/common.rs @@ -551,7 +551,6 @@ impl IntegrationTest { async move { let span = info_span!("client_request"); let span_id = span.context().span().span_context().trace_id(); - dbg!(&span_id); async move { let client = reqwest::Client::new(); @@ -574,7 +573,6 @@ impl IntegrationTest { let mut request = builder.json(&query).build().unwrap(); telemetry.inject_context(&mut request); - dbg!(&request.headers()); request.headers_mut().remove(ACCEPT); match client.execute(request).await { Ok(response) => (span_id, response), diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/configuration.yaml b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/configuration.yaml index b297fee443..e283bbdace 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/configuration.yaml +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/configuration.yaml @@ -12,6 +12,12 @@ preview_entity_cache: all: enabled: true subgraphs: - reviews: + invalidation-entity-key-reviews: ttl: 120s - enabled: true \ No newline at end of file + enabled: true + +telemetry: + exporters: + logging: + stdout: + format: text \ No newline at end of file diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/skipped.json b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json similarity index 75% rename from apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/skipped.json rename to apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json index b505259570..c08682fff8 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/skipped.json +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json @@ -7,11 +7,14 @@ "schema_path": "./supergraph.graphql", "configuration_path": "./configuration.yaml", "subgraphs": { - "products": { + "invalidation-entity-key-products": { "requests": [ { "request": { - "body": {"query":"{topProducts{__typename upc}}"} + "body": { + "query":"query InvalidationEntityKey__invalidation_entity_key_products__0{topProducts{__typename upc}}", + "operationName": "InvalidationEntityKey__invalidation_entity_key_products__0" + } }, "response": { "headers": { @@ -23,12 +26,13 @@ } ] }, - "reviews": { + "invalidation-entity-key-reviews": { "requests": [ { "request": { "body": { - "query":"query($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", + "query":"query InvalidationEntityKey__invalidation_entity_key_reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", + "operationName": "InvalidationEntityKey__invalidation_entity_key_reviews__1", "variables":{"representations":[{"upc":"0","__typename":"Product"},{"upc":"1","__typename":"Product"}]} } }, @@ -59,7 +63,7 @@ { "type": "Request", "request": { - "query": "{ topProducts { reviews { body } } }" + "query": "query InvalidationEntityKey { topProducts { reviews { body } } }" }, "expected_response": { "data":{ @@ -81,11 +85,13 @@ { "type": "ReloadSubgraphs", "subgraphs": { - "reviews": { + "invalidation-entity-key-reviews": { "requests": [ { "request": { - "body": {"query":"mutation{invalidateProductReview}"} + "body": { + "query":"mutation InvalidationEntityKey__invalidation_entity_key_reviews__0{invalidateProductReview}" + } }, "response": { "headers": { @@ -96,7 +102,7 @@ "extensions": { "invalidation": [{ "kind": "entity", - "subgraph": "reviews", + "subgraph": "invalidation-entity-key-reviews", "type": "Product", "key": { "upc": "1" @@ -109,7 +115,7 @@ { "request": { "body": { - "query":"query($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", + "query":"query InvalidationEntityKey__invalidation_entity_key_reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", "variables":{"representations":[{"upc":"1","__typename":"Product"}]} } }, @@ -129,7 +135,7 @@ { "type": "Request", "request": { - "query": "{ topProducts { reviews { body } } }" + "query": "query InvalidationEntityKey { topProducts { reviews { body } } }" }, "expected_response": { "data":{ @@ -151,7 +157,7 @@ { "type": "Request", "request": { - "query": "mutation { invalidateProductReview }" + "query": "mutation InvalidationEntityKey { invalidateProductReview }" }, "expected_response": { "data":{ @@ -162,7 +168,7 @@ { "type": "Request", "request": { - "query": "{ topProducts { reviews { body } } }" + "query": "query InvalidationEntityKey { topProducts { reviews { body } } }" }, "expected_response":{ "data":{ @@ -170,12 +176,12 @@ }, "errors":[ { - "message":"HTTP fetch failed from 'reviews': 500: Internal Server Error", - "extensions":{"code":"SUBREQUEST_HTTP_ERROR","service":"reviews","reason":"500: Internal Server Error","http":{"status":500}} + "message":"HTTP fetch failed from 'invalidation-entity-key-reviews': 500: Internal Server Error", + "extensions":{"code":"SUBREQUEST_HTTP_ERROR","service":"invalidation-entity-key-reviews","reason":"500: Internal Server Error","http":{"status":500}} }, { - "message":"service 'reviews' response was malformed: {}", - "extensions":{"service":"reviews","reason":"{}","code":"SUBREQUEST_MALFORMED_RESPONSE"} + "message":"service 'invalidation-entity-key-reviews' response was malformed: {}", + "extensions":{"service":"invalidation-entity-key-reviews","reason":"{}","code":"SUBREQUEST_MALFORMED_RESPONSE"} } ] } @@ -183,12 +189,12 @@ { "type": "ReloadSubgraphs", "subgraphs": { - "reviews": { + "invalidation-entity-key-reviews": { "requests": [ { "request": { "body": { - "query":"query($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", + "query":"query InvalidationEntityKey__invalidation_entity_key_reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", "variables":{"representations":[{"upc":"1","__typename":"Product"}]} } }, @@ -213,7 +219,7 @@ { "type": "Request", "request": { - "query": "{ topProducts { reviews { body } } }" + "query": "query InvalidationEntityKey { topProducts { reviews { body } } }" }, "expected_response": { "data":{ diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/supergraph.graphql b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/supergraph.graphql index 8f4b1aa05b..630e59c38b 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/supergraph.graphql +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/supergraph.graphql @@ -37,10 +37,10 @@ enum core__Purpose { scalar join__FieldSet enum join__Graph { - ACCOUNTS @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev") - INVENTORY @join__graph(name: "inventory", url: "https://inventory.demo.starstuff.dev") - PRODUCTS @join__graph(name: "products", url: "https://products.demo.starstuff.dev") - REVIEWS @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev") + ACCOUNTS @join__graph(name: "invalidation-entity-key-accounts", url: "https://accounts.demo.starstuff.dev") + INVENTORY @join__graph(name: "invalidation-entity-key-inventory", url: "https://inventory.demo.starstuff.dev") + PRODUCTS @join__graph(name: "invalidation-entity-key-products", url: "https://products.demo.starstuff.dev") + REVIEWS @join__graph(name: "invalidation-entity-key-reviews", url: "https://reviews.demo.starstuff.dev") } type Mutation { updateMyAccount: User @join__field(graph: ACCOUNTS) diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/README.md b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/README.md similarity index 100% rename from apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/README.md rename to apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/README.md diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/configuration.yaml b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/configuration.yaml similarity index 78% rename from apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/configuration.yaml rename to apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/configuration.yaml index a54c33f25d..85e106df9f 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/configuration.yaml +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/configuration.yaml @@ -17,4 +17,10 @@ preview_entity_cache: subgraphs: reviews: ttl: 120s - enabled: true \ No newline at end of file + enabled: true + +telemetry: + exporters: + logging: + stdout: + format: text \ No newline at end of file diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/skipped.json b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/plan.json similarity index 77% rename from apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/skipped.json rename to apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/plan.json index cadc7ac809..9bbbd1d90c 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/skipped.json +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/plan.json @@ -7,11 +7,11 @@ "schema_path": "./supergraph.graphql", "configuration_path": "./configuration.yaml", "subgraphs": { - "accounts": { + "invalidation-subgraph-name-accounts": { "requests": [ { "request": { - "body": {"query":"{me{name}}"} + "body": {"query":"query InvalidationSubgraphName__invalidation_subgraph_name_accounts__0{me{name}}"} }, "response": { "headers": { @@ -28,7 +28,7 @@ { "type": "Request", "request": { - "query": "{ me { name } }" + "query": "query InvalidationSubgraphName { me { name } }" }, "expected_response": { "data":{ @@ -41,11 +41,11 @@ { "type": "ReloadSubgraphs", "subgraphs": { - "accounts": { + "invalidation-subgraph-name-accounts": { "requests": [ { "request": { - "body": {"query":"mutation{updateMyAccount{name}}"} + "body": {"query":"mutation InvalidationSubgraphName__invalidation_subgraph_name_accounts__0{updateMyAccount{name}}"} }, "response": { "headers": { @@ -56,7 +56,7 @@ "extensions": { "invalidation": [{ "kind": "subgraph", - "subgraph": "accounts" + "subgraph": "invalidation-subgraph-name-accounts" }] } } @@ -69,7 +69,7 @@ { "type": "Request", "request": { - "query": "{ me { name } }" + "query": "query InvalidationSubgraphName { me { name } }" }, "expected_response": { "data":{ @@ -82,7 +82,7 @@ { "type": "Request", "request": { - "query": "mutation { updateMyAccount { name } }" + "query": "mutation InvalidationSubgraphName { updateMyAccount { name } }" }, "expected_response": { "data":{ @@ -95,15 +95,15 @@ { "type": "ReloadSubgraphs", "subgraphs": { - "accounts": { + "invalidation-subgraph-name-accounts": { "requests": [ { "request": { - "body": {"query":"{me{name}}"} + "body": {"query":"query InvalidationSubgraphName__invalidation_subgraph_name_accounts__0{me{name}}"} }, "response": { "headers": { - "Cache-Control": "public, max-age=10", + "Cache-Control": "no-store, max-age=0", "Content-Type": "application/json" }, "body": {"data": { "me": { "name": "invalidation-subgraph2" } } } @@ -116,7 +116,7 @@ { "type": "Request", "request": { - "query": "{ me { name } }" + "query": "query InvalidationSubgraphName{ me { name } }" }, "expected_response": { "data":{ diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/supergraph.graphql b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/supergraph.graphql similarity index 96% rename from apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/supergraph.graphql rename to apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/supergraph.graphql index 1196414b6f..c8184433b1 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/supergraph.graphql +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/supergraph.graphql @@ -37,7 +37,7 @@ enum core__Purpose { scalar join__FieldSet enum join__Graph { - ACCOUNTS @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev") + ACCOUNTS @join__graph(name: "invalidation-subgraph-name-accounts", url: "https://accounts.demo.starstuff.dev") INVENTORY @join__graph(name: "inventory", url: "https://inventory.demo.starstuff.dev") PRODUCTS @join__graph(name: "products", url: "https://products.demo.starstuff.dev") REVIEWS @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev") diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/configuration.yaml b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/configuration.yaml index 55728b841b..96577bbb28 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/configuration.yaml +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/configuration.yaml @@ -21,4 +21,10 @@ preview_entity_cache: subgraphs: reviews: ttl: 120s - enabled: true \ No newline at end of file + enabled: true + +telemetry: + exporters: + logging: + stdout: + format: text \ No newline at end of file diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/skipped.json b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/plan.json similarity index 74% rename from apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/skipped.json rename to apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/plan.json index 89e90f1be9..72e39a7b80 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/skipped.json +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/plan.json @@ -7,11 +7,11 @@ "schema_path": "./supergraph.graphql", "configuration_path": "./configuration.yaml", "subgraphs": { - "accounts": { + "invalidation-subgraph-type-accounts": { "requests": [ { "request": { - "body": {"query":"query InvalidationSubgraphType__accounts__0{me{name id}}","operationName":"InvalidationSubgraphType__accounts__0"} + "body": {"query":"query InvalidationSubgraphType__invalidation_subgraph_type_accounts__0{me{name id}}","operationName":"InvalidationSubgraphType__invalidation_subgraph_type_accounts__0"} }, "response": { "headers": { @@ -42,7 +42,7 @@ { "type": "ReloadSubgraphs", "subgraphs": { - "accounts": { + "invalidation-subgraph-type-accounts": { "requests": [] } } @@ -66,21 +66,24 @@ "url": "http://127.0.0.1:12345/invalidation-sample-subgraph-type", "request": { "method": "POST", - "body": { + "headers": { + "Authorization": "1234" + }, + "body": [{ "kind": "type", - "subgraph": "accounts", + "subgraph": "invalidation-subgraph-type-accounts", "type": "Query" - } + }] } }, { "type": "ReloadSubgraphs", "subgraphs": { - "accounts": { + "invalidation-subgraph-type-accounts": { "requests": [ { "request": { - "body": {"query":"query InvalidationSubgraphType__accounts__0{me{name id}}", "operationName":"InvalidationSubgraphType__accounts__0"} + "body": {"query":"query InvalidationSubgraphType__invalidation_subgraph_type_accounts__0{me{name id}}", "operationName":"InvalidationSubgraphType__invalidation_subgraph_type_accounts__0"} }, "response": { "headers": { @@ -108,6 +111,21 @@ } } }, + { + "type": "EndpointRequest", + "url": "http://127.0.0.1:12345/invalidation-sample-subgraph-type", + "request": { + "method": "POST", + "headers": { + "Authorization": "1234" + }, + "body": [{ + "kind": "type", + "subgraph": "invalidation-subgraph-type-accounts", + "type": "Query" + }] + } + }, { "type": "Stop" } diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/supergraph.graphql b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/supergraph.graphql index 1196414b6f..a9554a070d 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/supergraph.graphql +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/supergraph.graphql @@ -37,7 +37,7 @@ enum core__Purpose { scalar join__FieldSet enum join__Graph { - ACCOUNTS @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev") + ACCOUNTS @join__graph(name: "invalidation-subgraph-type-accounts", url: "https://accounts.demo.starstuff.dev") INVENTORY @join__graph(name: "inventory", url: "https://inventory.demo.starstuff.dev") PRODUCTS @join__graph(name: "products", url: "https://products.demo.starstuff.dev") REVIEWS @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev") diff --git a/apollo-router/tests/samples/enterprise/query-planning-redis/configuration.yaml b/apollo-router/tests/samples/enterprise/query-planning-redis/configuration.yaml index 97518c7956..b00c6e7d58 100644 --- a/apollo-router/tests/samples/enterprise/query-planning-redis/configuration.yaml +++ b/apollo-router/tests/samples/enterprise/query-planning-redis/configuration.yaml @@ -8,3 +8,9 @@ supergraph: cache: redis: urls: ["redis://localhost:6379",] + +telemetry: + exporters: + logging: + stdout: + format: text \ No newline at end of file diff --git a/apollo-router/tests/samples_tests.rs b/apollo-router/tests/samples_tests.rs index da324cfbd7..fe7d87ed71 100644 --- a/apollo-router/tests/samples_tests.rs +++ b/apollo-router/tests/samples_tests.rs @@ -472,13 +472,13 @@ impl TestExecution { writeln!(out, "assertion `left == right` failed").unwrap(); writeln!( out, - " left: {}", + " expected: {}", serde_json::to_string(&expected_response).unwrap() ) .unwrap(); writeln!( out, - "right: {}", + "received: {}", serde_json::to_string(&graphql_response).unwrap() ) .unwrap(); From 0068d68401ef4a10f8f77d359da2db6e5867c45a Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 13 Aug 2024 09:33:28 +0200 Subject: [PATCH 049/108] Entity cache: return cached entities with errors (#5776) If we are requesting entities from a subgraph, where some of them are present in cache, and the subgraph is unavailable (ex: network issue), we want to return a response with the entities we got from the cache, other entities nullified, and an error pointing at the paths of unavailable entities. Co-authored-by: Bryn Cooke --- .../feat_geal_return_response_with_errors.md | 5 + apollo-router/src/plugins/cache/entity.rs | 81 +++- ...ins__cache__tests__missing_entities-2.snap | 34 ++ ...ugins__cache__tests__missing_entities.snap | 20 + ...ter__plugins__cache__tests__no_data-2.snap | 39 ++ ...outer__plugins__cache__tests__no_data.snap | 20 + apollo-router/src/plugins/cache/tests.rs | 366 +++++++++++++++++- docs/source/configuration/entity-caching.mdx | 4 + 8 files changed, 565 insertions(+), 4 deletions(-) create mode 100644 .changesets/feat_geal_return_response_with_errors.md create mode 100644 apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__missing_entities-2.snap create mode 100644 apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__missing_entities.snap create mode 100644 apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data-2.snap create mode 100644 apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data.snap diff --git a/.changesets/feat_geal_return_response_with_errors.md b/.changesets/feat_geal_return_response_with_errors.md new file mode 100644 index 0000000000..5d83ad3fa1 --- /dev/null +++ b/.changesets/feat_geal_return_response_with_errors.md @@ -0,0 +1,5 @@ +### Entity cache: return cached entities with errors ([PR #5776](https://github.com/apollographql/router/pull/5776)) + +If we are requesting entities from a subgraph, where some of them are present in cache, and the subgraph is unavailable (ex: network issue), we want to return a response with the entities we got from the cache, other entities nullified, and an error pointing at the paths of unavailable entities. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5776 \ No newline at end of file diff --git a/apollo-router/src/plugins/cache/entity.rs b/apollo-router/src/plugins/cache/entity.rs index deacb1db7f..3992dbd670 100644 --- a/apollo-router/src/plugins/cache/entity.rs +++ b/apollo-router/src/plugins/cache/entity.rs @@ -659,8 +659,48 @@ impl InnerCacheService { .await? { ControlFlow::Break(response) => Ok(response), - ControlFlow::Continue((request, cache_result)) => { - let mut response = self.service.call(request).await?; + ControlFlow::Continue((request, mut cache_result)) => { + let context = request.context.clone(); + let mut response = match self.service.call(request).await { + Ok(response) => response, + Err(e) => { + let e = match e.downcast::() { + Ok(inner) => match *inner { + FetchError::SubrequestHttpError { .. } => *inner, + _ => FetchError::SubrequestHttpError { + status_code: None, + service: self.name.to_string(), + reason: inner.to_string(), + }, + }, + Err(e) => FetchError::SubrequestHttpError { + status_code: None, + service: self.name.to_string(), + reason: e.to_string(), + }, + }; + + let graphql_error = e.to_graphql_error(None); + + let (new_entities, new_errors) = assemble_response_from_errors( + &[graphql_error], + &mut cache_result.0, + ); + + let mut data = Object::default(); + data.insert(ENTITIES, new_entities.into()); + + let mut response = subgraph::Response::builder() + .context(context) + .data(Value::Object(data)) + .errors(new_errors) + .extensions(Object::new()) + .build(); + CacheControl::no_store().to_headers(response.response.headers_mut())?; + + return Ok(response); + } + }; let mut cache_control = if response.response.headers().contains_key(CACHE_CONTROL) { @@ -974,6 +1014,15 @@ async fn cache_store_entities_from_response( .map(|o| o.insert(ENTITIES, new_entities.into())); response.response.body_mut().data = data; response.response.body_mut().errors = new_errors; + } else { + let (new_entities, new_errors) = + assemble_response_from_errors(&response.response.body().errors, &mut result_from_cache); + + let mut data = Object::default(); + data.insert(ENTITIES, new_entities.into()); + + response.response.body_mut().data = Some(Value::Object(data)); + response.response.body_mut().errors = new_errors; } Ok(()) @@ -1327,3 +1376,31 @@ async fn insert_entities_in_result( Ok((new_entities, new_errors)) } + +fn assemble_response_from_errors( + graphql_errors: &[Error], + result: &mut Vec, +) -> (Vec, Vec) { + let mut new_entities = Vec::new(); + let mut new_errors = Vec::new(); + + for (new_entity_idx, IntermediateResult { cache_entry, .. }) in result.drain(..).enumerate() { + match cache_entry { + Some(v) => { + new_entities.push(v.data); + } + None => { + new_entities.push(Value::Null); + + for mut error in graphql_errors.iter().cloned() { + error.path = Some(Path(vec![ + PathElement::Key(ENTITIES.to_string(), None), + PathElement::Index(new_entity_idx), + ])); + new_errors.push(error); + } + } + } + } + (new_entities, new_errors) +} diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__missing_entities-2.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__missing_entities-2.snap new file mode 100644 index 0000000000..9798af179e --- /dev/null +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__missing_entities-2.snap @@ -0,0 +1,34 @@ +--- +source: apollo-router/src/plugins/cache/tests.rs +expression: response +--- +{ + "data": { + "currentUser": { + "allOrganizations": [ + { + "id": "1", + "name": "Organization 1" + }, + { + "id": "2", + "name": "Organization 2" + }, + { + "id": "3", + "name": null + } + ] + } + }, + "errors": [ + { + "message": "Organization not found", + "path": [ + "currentUser", + "allOrganizations", + 2 + ] + } + ] +} diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__missing_entities.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__missing_entities.snap new file mode 100644 index 0000000000..6ea1f9fedd --- /dev/null +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__missing_entities.snap @@ -0,0 +1,20 @@ +--- +source: apollo-router/src/plugins/cache/tests.rs +expression: response +--- +{ + "data": { + "currentUser": { + "allOrganizations": [ + { + "id": "1", + "name": "Organization 1" + }, + { + "id": "2", + "name": "Organization 2" + } + ] + } + } +} diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data-2.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data-2.snap new file mode 100644 index 0000000000..6e58a2d437 --- /dev/null +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data-2.snap @@ -0,0 +1,39 @@ +--- +source: apollo-router/src/plugins/cache/tests.rs +expression: response +--- +{ + "data": { + "currentUser": { + "allOrganizations": [ + { + "id": "1", + "name": "Organization 1" + }, + { + "id": "2", + "name": null + }, + { + "id": "3", + "name": "Organization 3" + } + ] + } + }, + "errors": [ + { + "message": "HTTP fetch failed from 'orga': orga not found", + "path": [ + "currentUser", + "allOrganizations", + 1 + ], + "extensions": { + "code": "SUBREQUEST_HTTP_ERROR", + "service": "orga", + "reason": "orga not found" + } + } + ] +} diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data.snap new file mode 100644 index 0000000000..b9832aaeaa --- /dev/null +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data.snap @@ -0,0 +1,20 @@ +--- +source: apollo-router/src/plugins/cache/tests.rs +expression: response +--- +{ + "data": { + "currentUser": { + "allOrganizations": [ + { + "id": "1", + "name": "Organization 1" + }, + { + "id": "3", + "name": "Organization 3" + } + ] + } + } +} diff --git a/apollo-router/src/plugins/cache/tests.rs b/apollo-router/src/plugins/cache/tests.rs index f5cb713473..36628acf5e 100644 --- a/apollo-router/src/plugins/cache/tests.rs +++ b/apollo-router/src/plugins/cache/tests.rs @@ -15,7 +15,9 @@ use tower::ServiceExt; use super::entity::EntityCache; use crate::cache::redis::RedisCacheStorage; use crate::plugin::test::MockSubgraph; +use crate::plugin::test::MockSubgraphService; use crate::plugins::cache::entity::Subgraph; +use crate::services::subgraph; use crate::services::supergraph; use crate::Context; use crate::MockedSubgraphs; @@ -55,6 +57,7 @@ const SCHEMA: &str = r#"schema id: ID! name: String activeOrganization: Organization + allOrganizations: [Organization] } type Organization @join__owner(graph: ORGA) @@ -93,6 +96,19 @@ impl Mocks for MockStore { } } } + "MGET" => { + let mut result: Vec = Vec::new(); + + let mut args_it = command.args.iter(); + while let Some(RedisValue::Bytes(key)) = args_it.next() { + if let Some(bytes) = self.map.lock().get(key) { + result.push(RedisValue::Bytes(bytes.clone())); + } else { + result.push(RedisValue::Null); + } + } + return Ok(RedisValue::Array(result)); + } "SET" => { if let (Some(RedisValue::Bytes(key)), Some(RedisValue::Bytes(value))) = (command.args.first(), command.args.get(1)) @@ -169,7 +185,7 @@ impl Mocks for MockStore { } }*/ _ => { - panic!() + panic!("unrecoginzed command: {command:?}") } } Err(RedisError::new(RedisErrorKind::NotFound, "mock not found")) @@ -213,7 +229,31 @@ async fn insert() { let redis_cache = RedisCacheStorage::from_mocks(Arc::new(MockStore::new())) .await .unwrap(); - let entity_cache = EntityCache::with_mocks(redis_cache.clone(), HashMap::new()) + let map = [ + ( + "user".to_string(), + Subgraph { + redis: None, + private_id: Some("sub".to_string()), + enabled: true, + ttl: None, + ..Default::default() + }, + ), + ( + "orga".to_string(), + Subgraph { + redis: None, + private_id: Some("sub".to_string()), + enabled: true, + ttl: None, + ..Default::default() + }, + ), + ] + .into_iter() + .collect(); + let entity_cache = EntityCache::with_mocks(redis_cache.clone(), map) .await .unwrap(); @@ -501,6 +541,328 @@ async fn private() { insta::assert_json_snapshot!(response); } +#[tokio::test] +async fn no_data() { + let query = "query { currentUser { allOrganizations { id name } } }"; + + let subgraphs = MockedSubgraphs([ + ("user", MockSubgraph::builder().with_json( + serde_json::json!{{"query":"{currentUser{allOrganizations{__typename id}}}"}}, + serde_json::json!{{"data": {"currentUser": { "allOrganizations": [ + { + "__typename": "Organization", + "id": "1" + }, + { + "__typename": "Organization", + "id": "3" + } + ] }}}} + ).with_header(CACHE_CONTROL, HeaderValue::from_static("no-store")).build()), + ("orga", MockSubgraph::builder().with_json( + serde_json::json!{{ + "query": "query($representations:[_Any!]!){_entities(representations:$representations){...on Organization{name}}}", + "variables": { + "representations": [ + { + "id": "1", + "__typename": "Organization", + }, + { + "id": "3", + "__typename": "Organization", + } + ] + }}}, + serde_json::json!{{ + "data": { + "_entities": [{ + "name": "Organization 1", + }, + { + "name": "Organization 3" + }] + } + }} + ).with_header(CACHE_CONTROL, HeaderValue::from_static("public, max-age=3600")).build()) + ].into_iter().collect()); + + let redis_cache = RedisCacheStorage::from_mocks(Arc::new(MockStore::new())) + .await + .unwrap(); + let map = [ + ( + "user".to_string(), + Subgraph { + redis: None, + private_id: Some("sub".to_string()), + enabled: true, + ttl: None, + ..Default::default() + }, + ), + ( + "orga".to_string(), + Subgraph { + redis: None, + private_id: Some("sub".to_string()), + enabled: true, + ttl: None, + ..Default::default() + }, + ), + ] + .into_iter() + .collect(); + let entity_cache = EntityCache::with_mocks(redis_cache.clone(), map) + .await + .unwrap(); + + let service = TestHarness::builder() + .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } })) + .unwrap() + .schema(SCHEMA) + .extra_plugin(entity_cache) + .extra_plugin(subgraphs) + .build_supergraph() + .await + .unwrap(); + + let request = supergraph::Request::fake_builder() + .query(query) + .context(Context::new()) + .build() + .unwrap(); + let mut response = service.oneshot(request).await.unwrap(); + + let response = response.next_response().await.unwrap(); + insta::assert_json_snapshot!(response); + + let entity_cache = EntityCache::with_mocks(redis_cache.clone(), HashMap::new()) + .await + .unwrap(); + + let subgraphs = MockedSubgraphs( + [( + "user", + MockSubgraph::builder() + .with_json( + serde_json::json! {{"query":"{currentUser{allOrganizations{__typename id}}}"}}, + serde_json::json! {{"data": {"currentUser": { "allOrganizations": [ + { + "__typename": "Organization", + "id": "1" + }, + { + "__typename": "Organization", + "id": "2" + }, + { + "__typename": "Organization", + "id": "3" + } + ] }}}}, + ) + .with_header(CACHE_CONTROL, HeaderValue::from_static("no-store")) + .build(), + )] + .into_iter() + .collect(), + ); + + let service = TestHarness::builder() + .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } })) + .unwrap() + .schema(SCHEMA) + .extra_plugin(entity_cache) + .subgraph_hook(|name, service| { + if name == "orga" { + let mut subgraph = MockSubgraphService::new(); + subgraph + .expect_call() + .times(1) + .returning(move |_req: subgraph::Request| Err("orga not found".into())); + subgraph.boxed() + } else { + service + } + }) + .extra_plugin(subgraphs) + .build_supergraph() + .await + .unwrap(); + + let request = supergraph::Request::fake_builder() + .query(query) + .context(Context::new()) + .build() + .unwrap(); + let mut response = service.oneshot(request).await.unwrap(); + let response = response.next_response().await.unwrap(); + + insta::assert_json_snapshot!(response); +} + +#[tokio::test] +async fn missing_entities() { + let query = "query { currentUser { allOrganizations { id name } } }"; + + let subgraphs = MockedSubgraphs([ + ("user", MockSubgraph::builder().with_json( + serde_json::json!{{"query":"{currentUser{allOrganizations{__typename id}}}"}}, + serde_json::json!{{"data": {"currentUser": { "allOrganizations": [ + { + "__typename": "Organization", + "id": "1" + }, + { + "__typename": "Organization", + "id": "2" + } + ] }}}} + ).with_header(CACHE_CONTROL, HeaderValue::from_static("no-store")).build()), + ("orga", MockSubgraph::builder().with_json( + serde_json::json!{{ + "query": "query($representations:[_Any!]!){_entities(representations:$representations){...on Organization{name}}}", + "variables": { + "representations": [ + { + "id": "1", + "__typename": "Organization", + }, + { + "id": "2", + "__typename": "Organization", + } + ] + }}}, + serde_json::json!{{ + "data": { + "_entities": [ + { + "name": "Organization 1", + }, + { + "name": "Organization 2" + } + ] + } + }} + ).with_header(CACHE_CONTROL, HeaderValue::from_static("public, max-age=3600")).build()) + ].into_iter().collect()); + + let redis_cache = RedisCacheStorage::from_mocks(Arc::new(MockStore::new())) + .await + .unwrap(); + let map = [ + ( + "user".to_string(), + Subgraph { + redis: None, + private_id: Some("sub".to_string()), + enabled: true, + ttl: None, + ..Default::default() + }, + ), + ( + "orga".to_string(), + Subgraph { + redis: None, + private_id: Some("sub".to_string()), + enabled: true, + ttl: None, + ..Default::default() + }, + ), + ] + .into_iter() + .collect(); + let entity_cache = EntityCache::with_mocks(redis_cache.clone(), map) + .await + .unwrap(); + + let service = TestHarness::builder() + .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } })) + .unwrap() + .schema(SCHEMA) + .extra_plugin(entity_cache) + .extra_plugin(subgraphs) + .build_supergraph() + .await + .unwrap(); + + let request = supergraph::Request::fake_builder() + .query(query) + .context(Context::new()) + .build() + .unwrap(); + let mut response = service.oneshot(request).await.unwrap(); + let response = response.next_response().await.unwrap(); + insta::assert_json_snapshot!(response); + + let entity_cache = EntityCache::with_mocks(redis_cache.clone(), HashMap::new()) + .await + .unwrap(); + + let subgraphs = MockedSubgraphs([ + ("user", MockSubgraph::builder().with_json( + serde_json::json!{{"query":"{currentUser{allOrganizations{__typename id}}}"}}, + serde_json::json!{{"data": {"currentUser": { "allOrganizations": [ + { + "__typename": "Organization", + "id": "1" + }, + { + "__typename": "Organization", + "id": "2" + }, + { + "__typename": "Organization", + "id": "3" + } + ] }}}} + ).with_header(CACHE_CONTROL, HeaderValue::from_static("no-store")).build()), + ("orga", MockSubgraph::builder().with_json( + serde_json::json!{{ + "query": "query($representations:[_Any!]!){_entities(representations:$representations){...on Organization{name}}}", + "variables": { + "representations": [ + { + "id": "3", + "__typename": "Organization", + } + ] + }}}, + serde_json::json!{{ + "data": null, + "errors": [{ + "message": "Organization not found", + }] + }} + ).with_header(CACHE_CONTROL, HeaderValue::from_static("public, max-age=3600")).build()) + ].into_iter().collect()); + + let service = TestHarness::builder() + .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } })) + .unwrap() + .schema(SCHEMA) + .extra_plugin(entity_cache) + .extra_plugin(subgraphs) + .build_supergraph() + .await + .unwrap(); + + let request = supergraph::Request::fake_builder() + .query(query) + .context(Context::new()) + .build() + .unwrap(); + let mut response = service.oneshot(request).await.unwrap(); + let response = response.next_response().await.unwrap(); + insta::assert_json_snapshot!(response); +} + /*FIXME: reactivate test if we manage to make fred return the response to SCAN in mocks #[tokio::test(flavor = "multi_thread")] async fn invalidate() { diff --git a/docs/source/configuration/entity-caching.mdx b/docs/source/configuration/entity-caching.mdx index 093ca7b947..6d53a2ba7b 100644 --- a/docs/source/configuration/entity-caching.mdx +++ b/docs/source/configuration/entity-caching.mdx @@ -133,6 +133,10 @@ The Router currently cannot know which types or fields should be cached, so it r To prevent transient errors from affecting the cache for a long duration, subgraph responses with errors are not cached. +### Cached entities with unavailable subgraph + +If some entities were obtained from the cache, but the subgraphs that provided them are unavailable, the router will return a response with the cached entities, and the other entities nullified (schema permitting), along with an error message for the nullified entities. + ### Authorization and entity caching When used alongside the router's [authorization directives](./authorization), cache entries are separated by authorization context. If a query contains fields that need a specific scope, the requests providing that scope have different cache entries from those not providing the scope. This means that data requiring authorization can still be safely cached and even shared across users, without needing invalidation when a user's roles change because their requests are automatically directed to a different part of the cache. From 81ff40d74ddee9520877fc0bfc111e16402872a9 Mon Sep 17 00:00:00 2001 From: Duckki Oe Date: Tue, 13 Aug 2024 08:12:02 -0700 Subject: [PATCH 050/108] refactor(federation): added more comments in fetch dependency graph code (#5806) - related to PR #5797 --- .../src/query_plan/fetch_dependency_graph.rs | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index 93d418606d..d266f7abcb 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -1072,6 +1072,9 @@ impl FetchDependencyGraph { if to_remove.is_empty() { return; // unchanged } + // Note: We remove empty nodes without relocating their children. The invariant is that + // the children of empty nodes (if any) must be accessible from the root via another path. + // Otherwise, they would've become inaccessible orphan nodes. self.retain_nodes(|node_index| !to_remove.contains(node_index)); } @@ -1450,6 +1453,13 @@ impl FetchDependencyGraph { // generate a simple string key from each node subgraph name and mergeAt. We do "sanitize" // subgraph name, but have no worries for `mergeAt` since it contains either number of // field names, and the later is restricted by graphQL so as to not be an issue. + // PORT_NOTE: The JS version iterates over the nodes in their index order, which is also + // the insertion order. The Rust version uses a topological sort to ensure that we never + // merge an ancestor node into a descendant node. JS version's insertion order is almost + // topologically sorted, thanks to the way the graph is constructed from the root. However, + // it's not exactly topologically sorted. So, it's unclear whether that is 100% safe. + // Note: MultiMap preserves insertion order for values of the same key. Thus, the values + // of the same key in `by_subgraphs` will be topologically sorted as well. let mut by_subgraphs = MultiMap::new(); let sorted_nodes = petgraph::algo::toposort(&self.graph, None) .map_err(|_| FederationError::internal("Failed to sort nodes due to cycle(s)"))?; @@ -1470,7 +1480,7 @@ impl FetchDependencyGraph { } // Create disjoint sets of the nodes. - // buckets: an array where each entry is a "bucket" of groups that can all be merge together. + // buckets: an array where each entry is a "bucket" of nodes that can all be merge together. let mut buckets: Vec<(NodeIndex, Vec)> = Vec::new(); let has_equal_inputs = |a: NodeIndex, b: NodeIndex| { let a_node = self.node_weight(a)?; @@ -1503,9 +1513,12 @@ impl FetchDependencyGraph { continue; }; - // We pick the head for the group and merge all others into it. Note that which - // group we choose shouldn't matter since the merging preserves all the + // We pick the head for the nodes and merge all others into it. Note that which + // node we choose shouldn't matter since the merging preserves all the // dependencies of each group (both parents and children). + // However, we must not merge an ancestor node into a descendant node. Thus, + // we choose the head as the first node in the bucket that is also the earliest + // in the topo-sorted order. for node in rest { self.merge_in_with_all_dependencies(*head, *node)?; } @@ -2017,6 +2030,7 @@ impl FetchDependencyGraph { Ok(()) } + /// Assumption: merged_id is not an ancestor of node_id in the graph. fn merge_in_internal( &mut self, node_id: NodeIndex, @@ -2071,6 +2085,7 @@ impl FetchDependencyGraph { // - node_id's defer_ref == merged_id's defer_ref // - node_id's subgraph_name == merged_id's subgraph_name // - node_id's merge_at == merged_id's merge_at + // - merged_id is not an ancestor of node_id in the graph. fn merge_in_with_all_dependencies( &mut self, node_id: NodeIndex, From 20c6134eaa0493f733e373ba8337549788740a16 Mon Sep 17 00:00:00 2001 From: Duckki Oe Date: Tue, 13 Aug 2024 19:57:09 -0700 Subject: [PATCH 051/108] test(federation): added `FetchDependencyGraph::to_dot` method (#5805) - Generates a string representing the graph in GraphViz dot format. - Mainly for debugging purposes. --- .../src/query_plan/fetch_dependency_graph.rs | 115 ++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index d266f7abcb..34a86e6ae9 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -2285,6 +2285,46 @@ impl std::fmt::Display for FetchDependencyGraph { } } +// Necessary for `petgraph::dot::Dot::with_attr_getters` calls to compile, but not executed. +impl std::fmt::Display for FetchDependencyGraphNode { + fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Err(std::fmt::Error) + } +} + +// Necessary for `petgraph::dot::Dot::with_attr_getters` calls to compile, but not executed. +impl std::fmt::Display for FetchDependencyGraphEdge { + fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Err(std::fmt::Error) + } +} + +impl FetchDependencyGraph { + // GraphViz output for FetchDependencyGraph + pub fn to_dot(&self) -> String { + fn label_node(node_id: NodeIndex, node: &FetchDependencyGraphNode) -> String { + let label_str = node.multiline_display(node_id).to_string(); + format!("label=\"{}\"", label_str.replace('"', "\\\"")) + } + + fn label_edge(edge_id: EdgeIndex) -> String { + format!("label=\"{}\"", edge_id.index()) + } + + let config = [ + petgraph::dot::Config::NodeNoLabel, + petgraph::dot::Config::EdgeNoLabel, + ]; + petgraph::dot::Dot::with_attr_getters( + &self.graph, + &config, + &(|_, er| label_edge(er.id())), + &(|_, (node_id, node)| label_node(node_id, node)), + ) + .to_string() + } +} + impl FetchDependencyGraphNode { pub(crate) fn selection_set_mut(&mut self) -> &mut FetchSelectionSet { self.cached_cost = None; @@ -2570,6 +2610,81 @@ impl FetchDependencyGraphNode { FetchDependencyNodeDisplay { node: self, index } } + // A variation of `fn display` with multiline output, which is more suitable for + // GraphViz output. + pub fn multiline_display(&self, index: NodeIndex) -> impl std::fmt::Display + '_ { + use std::fmt; + use std::fmt::Display; + use std::fmt::Formatter; + + struct DisplayList<'a, T: Display>(&'a [T]); + impl Display for DisplayList<'_, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let mut iter = self.0.iter(); + if let Some(x) = iter.next() { + write!(f, "{x}")?; + } + for x in iter { + write!(f, "::{x}")?; + } + Ok(()) + } + } + + struct FetchDependencyNodeDisplay<'a> { + node: &'a FetchDependencyGraphNode, + index: NodeIndex, + } + + impl Display for FetchDependencyNodeDisplay<'_> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "[{}]", self.index.index())?; + if self.node.defer_ref.is_some() { + write!(f, "(deferred)")?; + } + if let Some(&id) = self.node.id.get() { + write!(f, "{{id: {id}}}")?; + } + + write!(f, " {}", self.node.subgraph_name)?; + + match (self.node.merge_at.as_deref(), self.node.inputs.as_deref()) { + (Some(merge_at), Some(inputs)) => { + write!( + f, + // @(path::to::*::field)[{input1,input2} => { id }] + "\n@({})\n{}\n=>\n{}\n", + DisplayList(merge_at), + inputs, + self.node.selection_set.selection_set + )?; + } + (Some(merge_at), None) => { + write!( + f, + // @(path::to::*::field)[{} => { id }] + "\n@({})\n{{}}\n=>\n{}\n", + DisplayList(merge_at), + self.node.selection_set.selection_set + )?; + } + (None, _) => { + // [(type){ id }] + write!( + f, + "\n({})\n{}", + self.node.parent_type, self.node.selection_set.selection_set + )?; + } + } + + Ok(()) + } + } + + FetchDependencyNodeDisplay { node: self, index } + } + // PORT_NOTE: In JS version, this value is memoized on the node struct. fn subgraph_and_merge_at_key(&self) -> Option { // PORT_NOTE: In JS version, this hash value is defined as below. From 71962ef106ae3d15ed64b46d684f642a5516c69e Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Wed, 14 Aug 2024 11:04:22 +0200 Subject: [PATCH 052/108] execute supergraph query selector also on events (#5764) Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- ...fix_bnjjj_fix_supergraph_query_selector.md | 20 +++++++++++ .../plugins/telemetry/config_new/selectors.rs | 33 +++++++++++++++++-- 2 files changed, 51 insertions(+), 2 deletions(-) create mode 100644 .changesets/fix_bnjjj_fix_supergraph_query_selector.md diff --git a/.changesets/fix_bnjjj_fix_supergraph_query_selector.md b/.changesets/fix_bnjjj_fix_supergraph_query_selector.md new file mode 100644 index 0000000000..0914846045 --- /dev/null +++ b/.changesets/fix_bnjjj_fix_supergraph_query_selector.md @@ -0,0 +1,20 @@ +### Execute supergraph query selector also on events ([PR #5764](https://github.com/apollographql/router/pull/5764)) + +The `query: root_fields` selector works on `response` stage for events right now but it should also work on `event_response`. This configuration is now working: + +```yaml +telemetry: + instrumentation: + events: + supergraph: + OPERATION_LIMIT_INFO: + message: operation limit info + on: event_response + level: info + attributes: + graphql.operation.name: true + query.root_fields: + query: root_fields +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5764 \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/config_new/selectors.rs b/apollo-router/src/plugins/telemetry/config_new/selectors.rs index 1216afe0f5..875f88efb7 100644 --- a/apollo-router/src/plugins/telemetry/config_new/selectors.rs +++ b/apollo-router/src/plugins/telemetry/config_new/selectors.rs @@ -833,7 +833,11 @@ impl Selector for SupergraphSelector { .flatten() .map(opentelemetry::Value::from), - SupergraphSelector::Query { default, .. } => request + SupergraphSelector::Query { + default, + query: Query::String, + .. + } => request .supergraph_request .body() .query @@ -990,6 +994,25 @@ impl Selector for SupergraphSelector { ctx: &Context, ) -> Option { match self { + SupergraphSelector::Query { query, .. } => { + let limits_opt = ctx + .extensions() + .with_lock(|lock| lock.get::>().cloned()); + match query { + Query::Aliases => { + limits_opt.map(|limits| opentelemetry::Value::I64(limits.aliases as i64)) + } + Query::Depth => { + limits_opt.map(|limits| opentelemetry::Value::I64(limits.depth as i64)) + } + Query::Height => { + limits_opt.map(|limits| opentelemetry::Value::I64(limits.height as i64)) + } + Query::RootFields => limits_opt + .map(|limits| opentelemetry::Value::I64(limits.root_fields as i64)), + Query::String => None, + } + } SupergraphSelector::ResponseData { response_data, default, @@ -3010,13 +3033,19 @@ mod test { selector .on_response( &crate::services::SupergraphResponse::fake_builder() - .context(context) + .context(context.clone()) .build() .unwrap() ) .unwrap(), 4.into() ); + assert_eq!( + selector + .on_response_event(&crate::graphql::Response::builder().build(), &context) + .unwrap(), + 4.into() + ); } #[test] From e6252dd869fc4a76bd40643fd5bb7284804d87b1 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Wed, 14 Aug 2024 15:41:49 +0200 Subject: [PATCH 053/108] skip caching tests again (#5817) --- .../invalidation-entity-key/{plan.json => skipped.json} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/{plan.json => skipped.json} (100%) diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/skipped.json similarity index 100% rename from apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json rename to apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/skipped.json From 7752a1f9c10fe9c531501c884646dc1c671b6506 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Wed, 14 Aug 2024 16:04:38 +0200 Subject: [PATCH 054/108] improve entity cache documentation (#5574) Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> Co-authored-by: Coenen Benjamin Co-authored-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> Co-authored-by: Edward Huang Co-authored-by: Bryn Cooke --- .../exp_geal_entity_cache_documentation.md | 27 ++++ docs/source/configuration/entity-caching.mdx | 119 +++++++++++++++++- 2 files changed, 144 insertions(+), 2 deletions(-) create mode 100644 .changesets/exp_geal_entity_cache_documentation.md diff --git a/.changesets/exp_geal_entity_cache_documentation.md b/.changesets/exp_geal_entity_cache_documentation.md new file mode 100644 index 0000000000..546cc6cd40 --- /dev/null +++ b/.changesets/exp_geal_entity_cache_documentation.md @@ -0,0 +1,27 @@ +### Entity cache preview ([PR #5574](https://github.com/apollographql/router/pull/5574)) + +#### Support private information caching + +The router supports a new `private_id` option that enables separate, private cache entries to be allocated per user for authenticated requests. + +When a subgraph returns a `Cache-Control: private` header, the response data shouldn't be cached and shared among users. However, since the router supports request authentication, it can use it to allocate separate cache entries per users. + +To enable this, configure the `private_id` to be the name of a key in the request context that contains the data that's used to differentiate users. This option must be paired with a coprocessor or Rhai script to set the value in context. + +Example configuration: + +```yaml title="router.yaml" +# Enable entity caching globally +preview_entity_cache: + enabled: true + subgraph: + all: + enabled: true + accounts: + private_id: "user_id" +``` + + +To learn more about configuring and customizing private information caching, go to [Private information caching](https://www.apollographql.com/docs/router/configuration/entity-caching/#private-information-caching) docs. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5574 \ No newline at end of file diff --git a/docs/source/configuration/entity-caching.mdx b/docs/source/configuration/entity-caching.mdx index 6d53a2ba7b..53aeb930f5 100644 --- a/docs/source/configuration/entity-caching.mdx +++ b/docs/source/configuration/entity-caching.mdx @@ -99,12 +99,15 @@ preview_entity_cache: ttl: 120s # overrides the global TTL inventory: enabled: false # disable for a specific subgraph + accounts: + private_id: "user_id" ``` ### Configure time to live (TTL) -Besides configuring a global TTL for all the entries in Redis, the GraphOS Router also honors the [`Cache-Control` header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control) returned with the subgraph response. It generates a `Cache-Control` header for the client response by aggregating the TTL information from all response parts. -A TTL has to be configured for all subgraphs using entity caching, either defined in the per subgraph configuration or inherited from the global configuration. +To decide whether to cache an entity, the router honors the [`Cache-Control` header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control) returned with the subgraph response. Because `Cache-Control` might not contain a `max-age` or `s-max-age` option, a default TTL must either be defined per subgraph configuration or inherited from the global configuration. + +The router also generates a `Cache-Control` header for the client response by aggregating the TTL information from all response parts. If a subgraph doesn't return the header, its response is assumed to be `no-store`. ### Customize Redis cache key @@ -123,6 +126,118 @@ This entry contains an object with the `all` field to affect all subgraph reques ``` +### Private information caching + +A subgraph can return a response with the header `Cache-Control: private`, indicating that it contains user-personalized data. Although this usually forbids intermediate servers from storing data, the router may be able to recognize different users and store their data in different parts of the cache. + +To set up private information caching, you can configure the `private_id` option. `private_id` is a string pointing at a field in the request context that contains data used to recognize users (for example, user id, or `sub` claim in JWT). + +As an example, if you are using the router's JWT authentication plugin, you can first configure the `private_id` option in the `accounts` subgraph to point to the `user_id` key in context, then use a Rhai script to set that key from the JWT's `sub` claim: + +```yaml title="router.yaml" +preview_entity_cache: + enabled: true + subgraph: + all: + enabled: true + redis: + urls: ["redis://..."] + subgraphs: + accounts: + private_id: "user_id" +authentication: + router: + jwt: + jwks: + - url: https://auth-server/jwks.json +``` + +```rhai title="main.rhai" +fn supergraph_service(service) { + let request_callback = |request| { + let claims = request.context[Router.APOLLO_AUTHENTICATION_JWT_CLAIMS]; + + if claims != () { + let private_id = claims["sub"]; + request.context["user_id"] = private_id; + } + }; + + service.map_request(request_callback); +} +``` + +The router implements the following sequence to determine whether a particular query returns private data: + +- Upon seeing a query for the first time, the router requests the cache as if it were a public-only query. +- When the subgraph returns the response with private data, the router recognizes it and stores the data in a user-specific part of the cache. +- The router stores the query in a list of known queries with private data. +- When the router subsequently sees a known query: + - If the private id isn't provided, the router doesn't interrogate the cache, but it instead transmits the subgraph response directly. + - If the private id is provided, the router queries the part of the cache for the current user and checks the subgraph if nothing is available. + +### Observability + +The router supports a [`cache` selector](./telemetry/instrumentation/selectors#subgraph) in telemetry for the subgraph service. The selector returns the number of cache hits or misses by an entity for a subgraph request. + +## Spans + +You can add a new attribute on the subgraph span for the number of cache hits. For example: + +```yaml title="router.yaml" +telemetry: + instrumentation: + spans: + subgraph: + attributes: + cache.hit: + cache: hit +``` + +## Metrics + +The router provides the `telemetry.instrumentation.instruments.cache` instrument to enable cache metrics: + +```yaml title="router.yaml" +telemetry: + instrumentation: + instruments: + cache: # Cache instruments configuration + apollo.router.operations.entity.cache: # A counter which counts the number of cache hit and miss for subgraph requests + attributes: + entity.type: true # Include the entity type name. default: false + subgraph.name: # Custom attributes to include the subgraph name in the metric + subgraph_name: true + supergraph.operation.name: # Add custom attribute to display the supergraph operation name + supergraph_operation_name: string + # You can add more custom attributes using subgraph selectors +``` + +You can use custom instruments to create metrics for the subgraph service. The following example creates a custom instrument to generate a histogram that measures the subgraph request duration when there's at least one cache hit for the "inventory" subgraph: + +```yaml title="router.yaml" +telemetry: + instrumentation: + instruments: + subgraph: + only_cache_hit_on_subgraph_inventory: + type: histogram + value: duration + unit: hit + description: histogram of subgraph request duration when we have cache hit on subgraph inventory + condition: + all: + - eq: + - subgraph_name: true # subgraph selector + - inventory + - gt: # If the number of cache hit is greater than 0 + - cache: hit + # entity_type: Product # Here you could also only check for the entity type Product, it's `all` by default if we don't specify this config. + - 0 + +``` + + ## Implementation notes ### Cache-Control header requirement From 7b8d801b5f69334454f970d9ff77663545c42a02 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Wed, 14 Aug 2024 15:33:06 +0100 Subject: [PATCH 055/108] (bug) Trace propagation via header doesn't work (#5802) Co-authored-by: bryn Co-authored-by: Edward Huang --- .changesets/fix_bryn_remote_spans.md | 15 +++++++ .../src/plugins/telemetry/otel/layer.rs | 4 +- .../fixtures/trace_id_via_header.router.yaml | 33 +++++++++++++++ .../tests/integration/telemetry/mod.rs | 1 + .../integration/telemetry/propagation.rs | 40 +++++++++++++++++++ 5 files changed, 92 insertions(+), 1 deletion(-) create mode 100644 .changesets/fix_bryn_remote_spans.md create mode 100644 apollo-router/tests/integration/telemetry/fixtures/trace_id_via_header.router.yaml create mode 100644 apollo-router/tests/integration/telemetry/propagation.rs diff --git a/.changesets/fix_bryn_remote_spans.md b/.changesets/fix_bryn_remote_spans.md new file mode 100644 index 0000000000..bc74c68ec5 --- /dev/null +++ b/.changesets/fix_bryn_remote_spans.md @@ -0,0 +1,15 @@ +### Fix trace propagation via header ([PR #5802](https://github.com/apollographql/router/pull/5802)) + +The router now correctly propagates trace IDs when using the `propagation.request.header_name` configuration option. + +```yaml + exporters: + tracing: + propagation: + request: + header_name: "id_from_header" +``` + +Previously, trace IDs weren't transferred to the root span of the request, causing spans to be incorrectly attributed to new traces. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5802 diff --git a/apollo-router/src/plugins/telemetry/otel/layer.rs b/apollo-router/src/plugins/telemetry/otel/layer.rs index 495d22f8ec..e1d20ec739 100644 --- a/apollo-router/src/plugins/telemetry/otel/layer.rs +++ b/apollo-router/src/plugins/telemetry/otel/layer.rs @@ -758,7 +758,9 @@ where let parent_cx = self.parent_context(attrs, &ctx); // Record new trace id if there is no active parent span - let trace_id = if parent_cx.span().span_context().is_valid() { + let trace_id = if parent_cx.span().span_context().is_valid() + || parent_cx.span().span_context().trace_id() != opentelemetry::trace::TraceId::INVALID + { // It probably means we have a remote parent trace parent_cx.span().span_context().trace_id() } else { diff --git a/apollo-router/tests/integration/telemetry/fixtures/trace_id_via_header.router.yaml b/apollo-router/tests/integration/telemetry/fixtures/trace_id_via_header.router.yaml new file mode 100644 index 0000000000..a213522b36 --- /dev/null +++ b/apollo-router/tests/integration/telemetry/fixtures/trace_id_via_header.router.yaml @@ -0,0 +1,33 @@ +telemetry: + + instrumentation: + + spans: + + mode: spec_compliant + router: + attributes: + # This should match the trace ID in the request + id_from_header: + trace_id: open_telemetry + events: + router: + # Standard events + request: info + + apollo: + field_level_instrumentation_sampler: always_off + exporters: + tracing: + propagation: + request: + header_name: "id_from_header" + logging: + stdout: + format: + text: + display_trace_id: true + display_span_id: true + ansi_escape_codes: false + display_current_span: true + diff --git a/apollo-router/tests/integration/telemetry/mod.rs b/apollo-router/tests/integration/telemetry/mod.rs index 0a31187c58..8df0a1d753 100644 --- a/apollo-router/tests/integration/telemetry/mod.rs +++ b/apollo-router/tests/integration/telemetry/mod.rs @@ -5,5 +5,6 @@ mod jaeger; mod logging; mod metrics; mod otlp; +mod propagation; #[cfg(any(not(feature = "ci"), all(target_arch = "x86_64", target_os = "linux")))] mod zipkin; diff --git a/apollo-router/tests/integration/telemetry/propagation.rs b/apollo-router/tests/integration/telemetry/propagation.rs new file mode 100644 index 0000000000..e458f1986c --- /dev/null +++ b/apollo-router/tests/integration/telemetry/propagation.rs @@ -0,0 +1,40 @@ +use serde_json::json; +use tower::BoxError; + +use crate::integration::common::graph_os_enabled; +use crate::integration::common::IntegrationTest; +use crate::integration::common::Telemetry; + +#[tokio::test(flavor = "multi_thread")] +async fn test_trace_id_via_header() -> Result<(), BoxError> { + if !graph_os_enabled() { + eprintln!("test skipped"); + return Ok(()); + } + async fn make_call(router: &mut IntegrationTest, trace_id: &str) { + let _ = router.execute_query_with_headers(&json!({"query":"query {topProducts{name, name, name, name, name, name, name, name, name, name}}","variables":{}}), + [("id_from_header".to_string(), trace_id.to_string())].into()).await; + } + + let mut router = IntegrationTest::builder() + .telemetry(Telemetry::None) + .config(include_str!("fixtures/trace_id_via_header.router.yaml")) + .build() + .await; + + let trace_id = "00000000000000000000000000000001"; + router.start().await; + router.assert_started().await; + make_call(&mut router, trace_id).await; + router + .assert_log_contains(&format!("trace_id: {}", trace_id)) + .await; + + make_call(&mut router, trace_id).await; + router + .assert_log_contains(&format!("\"id_from_header\": \"{}\"", trace_id)) + .await; + + router.graceful_shutdown().await; + Ok(()) +} From 48a0623ad29487846153924df1cd7e39492dbb48 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Wed, 14 Aug 2024 15:33:50 +0100 Subject: [PATCH 056/108] Add `format` for trace ID propagation. (#5803) Co-authored-by: bryn Co-authored-by: Edward Huang --- .changesets/feat_propagation_format.md | 22 +++++++++ ...nfiguration__tests__schema_generation.snap | 5 ++ apollo-router/src/plugins/telemetry/config.rs | 3 ++ apollo-router/src/plugins/telemetry/mod.rs | 48 +++++++++++++++++-- .../telemetry/exporters/tracing/overview.mdx | 23 +++++++++ 5 files changed, 96 insertions(+), 5 deletions(-) create mode 100644 .changesets/feat_propagation_format.md diff --git a/.changesets/feat_propagation_format.md b/.changesets/feat_propagation_format.md new file mode 100644 index 0000000000..9aaad0efb2 --- /dev/null +++ b/.changesets/feat_propagation_format.md @@ -0,0 +1,22 @@ +### Add `format` for trace ID propagation. ([PR #5803](https://github.com/apollographql/router/pull/5803)) + +The router now supports specifying the format of trace IDs that are propagated to subgraphs via headers. + +You can configure the format with the `format` option: + +```yaml +telemetry: + exporters: + tracing: + propagation: + request: + header_name: "my_header" + # Must be in UUID form, with or without dashes + format: uuid +``` + +Note that incoming requests must be some form of UUID, either with or without dashes. + +To learn about supported formats, go to [`request` configuration reference](https://apollographql.com/docs/router/configuration/telemetry/exporters/tracing/overview#request-configuration-reference) docs. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5803 diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 74294b07ba..e615c2b834 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -4625,12 +4625,17 @@ expression: "&schema" "RequestPropagation": { "additionalProperties": false, "properties": { + "format": { + "$ref": "#/definitions/TraceIdFormat", + "description": "#/definitions/TraceIdFormat" + }, "header_name": { "description": "Choose the header name to expose trace_id (default: apollo-trace-id)", "type": "string" } }, "required": [ + "format", "header_name" ], "type": "object" diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index 42c6090b3b..5a506401a8 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -322,6 +322,9 @@ pub(crate) struct RequestPropagation { #[schemars(with = "String")] #[serde(deserialize_with = "deserialize_option_header_name")] pub(crate) header_name: Option, + + /// The trace ID format that will be used when propagating to subgraph services. + pub(crate) format: TraceIdFormat, } #[derive(Debug, Clone, Deserialize, JsonSchema)] diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index c50f98bd00..6fea2af4b1 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -939,6 +939,7 @@ impl Telemetry { if let Some(from_request_header) = &propagation.request.header_name { propagators.push(Box::new(CustomTraceIdPropagator::new( from_request_header.to_string(), + propagation.request.format.clone(), ))); } @@ -2020,13 +2021,15 @@ fn store_ftv1(subgraph_name: &ByteString, resp: SubgraphResponse) -> SubgraphRes struct CustomTraceIdPropagator { header_name: String, fields: [String; 1], + format: TraceIdFormat, } impl CustomTraceIdPropagator { - fn new(header_name: String) -> Self { + fn new(header_name: String, format: TraceIdFormat) -> Self { Self { fields: [header_name.clone()], header_name, + format, } } @@ -2058,9 +2061,9 @@ impl TextMapPropagator for CustomTraceIdPropagator { fn inject_context(&self, cx: &opentelemetry::Context, injector: &mut dyn Injector) { let span = cx.span(); let span_context = span.span_context(); - if span_context.is_valid() { - let header_value = format!("{}", span_context.trace_id()); - injector.set(&self.header_name, header_value); + if span_context.trace_id() != TraceId::INVALID { + let formatted_trace_id = self.format.format(span_context.trace_id()); + injector.set(&self.header_name, formatted_trace_id); } } @@ -2130,6 +2133,14 @@ mod tests { use http::StatusCode; use insta::assert_snapshot; use itertools::Itertools; + use opentelemetry_api::propagation::Injector; + use opentelemetry_api::propagation::TextMapPropagator; + use opentelemetry_api::trace::SpanContext; + use opentelemetry_api::trace::SpanId; + use opentelemetry_api::trace::TraceContextExt; + use opentelemetry_api::trace::TraceFlags; + use opentelemetry_api::trace::TraceId; + use opentelemetry_api::trace::TraceState; use serde_json::Value; use serde_json_bytes::json; use serde_json_bytes::ByteString; @@ -2159,6 +2170,7 @@ mod tests { use crate::plugin::test::MockSubgraphService; use crate::plugin::test::MockSupergraphService; use crate::plugin::DynPlugin; + use crate::plugins::telemetry::config::TraceIdFormat; use crate::plugins::telemetry::handle_error_internal; use crate::services::router::body::get_body_bytes; use crate::services::RouterRequest; @@ -3195,11 +3207,37 @@ mod tests { let trace_id = String::from("04f9e396-465c-4840-bc2b-f493b8b1a7fc"); let expected_trace_id = String::from("04f9e396465c4840bc2bf493b8b1a7fc"); - let propagator = CustomTraceIdPropagator::new(header.clone()); + let propagator = CustomTraceIdPropagator::new(header.clone(), TraceIdFormat::Uuid); let mut headers: HashMap = HashMap::new(); headers.insert(header, trace_id); let span = propagator.extract_span_context(&headers); assert!(span.is_some()); assert_eq!(span.unwrap().trace_id().to_string(), expected_trace_id); } + + #[test] + fn test_header_propagation_format() { + struct Injected(HashMap); + impl Injector for Injected { + fn set(&mut self, key: &str, value: String) { + self.0.insert(key.to_string(), value); + } + } + let mut injected = Injected(HashMap::new()); + let _ctx = opentelemetry::Context::new() + .with_remote_span_context(SpanContext::new( + TraceId::from_u128(0x04f9e396465c4840bc2bf493b8b1a7fc), + SpanId::INVALID, + TraceFlags::default(), + false, + TraceState::default(), + )) + .attach(); + let propagator = CustomTraceIdPropagator::new("my_header".to_string(), TraceIdFormat::Uuid); + propagator.inject_context(&opentelemetry::Context::current(), &mut injected); + assert_eq!( + injected.0.get("my_header").unwrap(), + "04f9e396-465c-4840-bc2b-f493b8b1a7fc" + ); + } } diff --git a/docs/source/configuration/telemetry/exporters/tracing/overview.mdx b/docs/source/configuration/telemetry/exporters/tracing/overview.mdx index 1bab45a2c2..c7b81cca70 100644 --- a/docs/source/configuration/telemetry/exporters/tracing/overview.mdx +++ b/docs/source/configuration/telemetry/exporters/tracing/overview.mdx @@ -144,9 +144,32 @@ telemetry: # If you have your own way to generate a trace id and you want to pass it via a custom request header request: + # The name of the header to read the trace id from header_name: my-trace-id + # The format of the trace when propagating to subgraphs. + format: uuid ``` +#### `request` configuration reference + +| Option | Values | Default | Description | +|---------------|---------------------------------------------------------------|-----------------------------------|-------------------------------------| +| `header_name` | | | The name of the http header to use for propagation. | +| `format` | `hexadecimal`\|`open_telemetry`\|`decimal`\|`datadog`\|`uuid` | `hexadecimal` | The output format of the `trace_id` | + +Valid values for `format`: +* `hexadecimal` - 32-character hexadecimal string (e.g. `0123456789abcdef0123456789abcdef`) +* `open_telemetry` - 32-character hexadecimal string (e.g. `0123456789abcdef0123456789abcdef`) +* `decimal` - 16-character decimal string (e.g. `1234567890123456`) +* `datadog` - 16-character decimal string (e.g. `1234567890123456`) +* `uuid` - 36-character UUID string (e.g. `01234567-89ab-cdef-0123-456789abcdef`) + + + +Incoming trace IDs must be in `open_telemetry` or `uuid` format. + + + ### Limits You may set limits on spans to prevent sending too much data to your APM. For example: From 9d17023d86b98fdfd71421cafe5a500470193644 Mon Sep 17 00:00:00 2001 From: Taylor Ninesling Date: Wed, 14 Aug 2024 08:14:23 -0700 Subject: [PATCH 057/108] Ignore non-apollo directives when extracting demand control directives to subgraphs (#5782) --- .../src/link/cost_spec_definition.rs | 31 ++- .../extract_subgraphs_from_supergraph.rs | 26 ++- apollo-federation/tests/extract_subgraphs.rs | 186 ++++++++++++++++++ ...mand_control_directive_name_conflicts.snap | 141 +++++++++++++ ...mand_control_directive_name_conflicts.snap | 141 +++++++++++++ 5 files changed, 503 insertions(+), 22 deletions(-) create mode 100644 apollo-federation/tests/snapshots/main__extract_subgraphs__does_not_extract_demand_control_directive_name_conflicts.snap create mode 100644 apollo-federation/tests/snapshots/main__extract_subgraphs__does_not_extract_renamed_demand_control_directive_name_conflicts.snap diff --git a/apollo-federation/src/link/cost_spec_definition.rs b/apollo-federation/src/link/cost_spec_definition.rs index 38e1f94619..b3c2b09615 100644 --- a/apollo-federation/src/link/cost_spec_definition.rs +++ b/apollo-federation/src/link/cost_spec_definition.rs @@ -44,11 +44,8 @@ macro_rules! propagate_demand_control_directives { original_directive_names: &HashMap, ) -> Result<(), FederationError> { let cost_directive_name = original_directive_names.get(&COST_DIRECTIVE_NAME_IN_SPEC); - if let Some(cost_directive) = source.get( - cost_directive_name - .unwrap_or(&COST_DIRECTIVE_NAME_IN_SPEC) - .as_str(), - ) { + let cost_directive = cost_directive_name.and_then(|name| source.get(name.as_str())); + if let Some(cost_directive) = cost_directive { dest.push($wrap_ty(self.cost_directive( subgraph_schema, cost_directive.arguments.clone(), @@ -57,11 +54,9 @@ macro_rules! propagate_demand_control_directives { let list_size_directive_name = original_directive_names.get(&LIST_SIZE_DIRECTIVE_NAME_IN_SPEC); - if let Some(list_size_directive) = source.get( - list_size_directive_name - .unwrap_or(&LIST_SIZE_DIRECTIVE_NAME_IN_SPEC) - .as_str(), - ) { + let list_size_directive = + list_size_directive_name.and_then(|name| source.get(name.as_str())); + if let Some(list_size_directive) = list_size_directive { dest.push($wrap_ty(self.list_size_directive( subgraph_schema, list_size_directive.arguments.clone(), @@ -83,11 +78,9 @@ macro_rules! propagate_demand_control_directives_to_position { original_directive_names: &HashMap, ) -> Result<(), FederationError> { let cost_directive_name = original_directive_names.get(&COST_DIRECTIVE_NAME_IN_SPEC); - if let Some(cost_directive) = source.directives.get( - cost_directive_name - .unwrap_or(&COST_DIRECTIVE_NAME_IN_SPEC) - .as_str(), - ) { + let cost_directive = + cost_directive_name.and_then(|name| source.directives.get(name.as_str())); + if let Some(cost_directive) = cost_directive { dest.insert_directive( subgraph_schema, Component::from( @@ -98,11 +91,9 @@ macro_rules! propagate_demand_control_directives_to_position { let list_size_directive_name = original_directive_names.get(&LIST_SIZE_DIRECTIVE_NAME_IN_SPEC); - if let Some(list_size_directive) = source.directives.get( - list_size_directive_name - .unwrap_or(&LIST_SIZE_DIRECTIVE_NAME_IN_SPEC) - .as_str(), - ) { + let list_size_directive = + list_size_directive_name.and_then(|name| source.directives.get(name.as_str())); + if let Some(list_size_directive) = list_size_directive { dest.insert_directive( subgraph_schema, Component::from(self.list_size_directive( diff --git a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs b/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs index 23b1b7fbe4..c714cc21b6 100644 --- a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs +++ b/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs @@ -50,6 +50,7 @@ use crate::link::join_spec_definition::JoinSpecDefinition; use crate::link::join_spec_definition::TypeDirectiveArguments; use crate::link::spec::Identity; use crate::link::spec::Version; +use crate::link::spec::APOLLO_SPEC_DOMAIN; use crate::link::spec_definition::SpecDefinition; use crate::link::Link; use crate::link::DEFAULT_LINK_NAME; @@ -308,13 +309,34 @@ struct TypeInfos { input_object_types: Vec, } -fn get_original_directive_names( +/// Builds a map of original name to new name for Apollo feature directives. This is +/// used to handle cases where a directive is renamed via an import statement. For +/// example, importing a directive with a custom name like +/// ```graphql +/// @link(url: "https://specs.apollo.dev/cost/v0.1", import: [{ name: "@cost", as: "@renamedCost" }]) +/// ``` +/// results in a map entry of `cost -> renamedCost` with the `@` prefix removed. +/// +/// If the directive is imported under its default name, that also results in an entry. So, +/// ```graphql +/// @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@cost"]) +/// ``` +/// results in a map entry of `cost -> cost`. This duals as a way to check if a directive +/// is included in the supergraph schema. +/// +/// **Important:** This map does _not_ include directives imported from identities other +/// than `specs.apollo.dev`. This helps us avoid extracting directives to subgraphs +/// when a custom directive's name conflicts with that of a default one. +fn get_apollo_directive_names( supergraph_schema: &FederationSchema, ) -> Result, FederationError> { let mut hm: HashMap = HashMap::new(); for directive in &supergraph_schema.schema().schema_definition.directives { if directive.name.as_str() == "link" { if let Ok(link) = Link::from_directive_application(directive) { + if link.url.identity.domain != APOLLO_SPEC_DOMAIN { + continue; + } for import in link.imports { hm.insert(import.element.clone(), import.imported_name().clone()); } @@ -332,7 +354,7 @@ fn extract_subgraphs_from_fed_2_supergraph( join_spec_definition: &'static JoinSpecDefinition, filtered_types: &Vec, ) -> Result<(), FederationError> { - let original_directive_names = get_original_directive_names(supergraph_schema)?; + let original_directive_names = get_apollo_directive_names(supergraph_schema)?; let TypeInfos { object_types, diff --git a/apollo-federation/tests/extract_subgraphs.rs b/apollo-federation/tests/extract_subgraphs.rs index a3316e895d..2148185184 100644 --- a/apollo-federation/tests/extract_subgraphs.rs +++ b/apollo-federation/tests/extract_subgraphs.rs @@ -511,3 +511,189 @@ fn extracts_renamed_demand_control_directives() { } insta::assert_snapshot!(snapshot); } + +#[test] +fn does_not_extract_demand_control_directive_name_conflicts() { + let subgraphs = Supergraph::new(r#" + schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link(url: "https://example.com/myCustomDirective/v1.0", import: ["@cost"]) + @link(url: "https://example.com/myOtherCustomDirective/v1.0", import: ["@listSize"]) + { + query: Query + } + + directive @cost(name: String!) on FIELD_DEFINITION | SCALAR + + directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + + directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + + directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + + directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + + directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + directive @listSize(name: String!) on FIELD_DEFINITION + + input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! + } + + scalar join__DirectiveArguments + + scalar join__FieldSet + + scalar join__FieldValue + + enum join__Graph { + SUBGRAPH_A @join__graph(name: "subgraph-a", url: "") + SUBGRAPH_B @join__graph(name: "subgraph-b", url: "") + } + + scalar link__Import + + enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + scalar ExpensiveInt @cost(name: "expensiveInt") + @join__type(graph: SUBGRAPH_A) + + type Query + @join__type(graph: SUBGRAPH_A) + @join__type(graph: SUBGRAPH_B) + { + a: ExpensiveInt @join__field(graph: SUBGRAPH_A) @cost(name: "cost") + b: [Int] @join__field(graph: SUBGRAPH_B) @listSize(name: "listSize") + } + "#) + .expect("parses") + .extract_subgraphs() + .expect("extracts"); + + let mut snapshot = String::new(); + for (_name, subgraph) in subgraphs { + use std::fmt::Write; + + _ = writeln!( + &mut snapshot, + "{}\n---\n{}", + subgraph.name, + subgraph.schema.schema() + ); + } + insta::assert_snapshot!(snapshot); +} + +#[test] +fn does_not_extract_renamed_demand_control_directive_name_conflicts() { + let subgraphs = Supergraph::new(r#" + schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link(url: "https://example.com/myCustomDirective/v1.0", import: [{name: "@cost", as: "@renamedCost"}]) + @link(url: "https://example.com/myOtherCustomDirective/v1.0", import: [{name: "@listSize", as: "@renamedListSize"}]) + { + query: Query + } + + directive @renamedCost(name: String!) on FIELD_DEFINITION | SCALAR + + directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + + directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + + directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + + directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + + directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + directive @renamedListSize(name: String!) on FIELD_DEFINITION + + input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! + } + + scalar join__DirectiveArguments + + scalar join__FieldSet + + scalar join__FieldValue + + enum join__Graph { + SUBGRAPH_A @join__graph(name: "subgraph-a", url: "") + SUBGRAPH_B @join__graph(name: "subgraph-b", url: "") + } + + scalar link__Import + + enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + scalar ExpensiveInt @renamedCost(name: "expensiveInt") + @join__type(graph: SUBGRAPH_A) + + type Query + @join__type(graph: SUBGRAPH_A) + @join__type(graph: SUBGRAPH_B) + { + a: ExpensiveInt @join__field(graph: SUBGRAPH_A) @renamedCost(name: "cost") + b: [Int] @join__field(graph: SUBGRAPH_B) @renamedListSize(name: "listSize") + } + "#) + .expect("parses") + .extract_subgraphs() + .expect("extracts"); + + let mut snapshot = String::new(); + for (_name, subgraph) in subgraphs { + use std::fmt::Write; + + _ = writeln!( + &mut snapshot, + "{}\n---\n{}", + subgraph.name, + subgraph.schema.schema() + ); + } + insta::assert_snapshot!(snapshot); +} diff --git a/apollo-federation/tests/snapshots/main__extract_subgraphs__does_not_extract_demand_control_directive_name_conflicts.snap b/apollo-federation/tests/snapshots/main__extract_subgraphs__does_not_extract_demand_control_directive_name_conflicts.snap new file mode 100644 index 0000000000..f86e759fca --- /dev/null +++ b/apollo-federation/tests/snapshots/main__extract_subgraphs__does_not_extract_demand_control_directive_name_conflicts.snap @@ -0,0 +1,141 @@ +--- +source: apollo-federation/tests/extract_subgraphs.rs +expression: snapshot +--- +subgraph-a +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +scalar ExpensiveInt + +type Query { + a: ExpensiveInt + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} + +subgraph-b +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +type Query { + b: [Int] + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} diff --git a/apollo-federation/tests/snapshots/main__extract_subgraphs__does_not_extract_renamed_demand_control_directive_name_conflicts.snap b/apollo-federation/tests/snapshots/main__extract_subgraphs__does_not_extract_renamed_demand_control_directive_name_conflicts.snap new file mode 100644 index 0000000000..f86e759fca --- /dev/null +++ b/apollo-federation/tests/snapshots/main__extract_subgraphs__does_not_extract_renamed_demand_control_directive_name_conflicts.snap @@ -0,0 +1,141 @@ +--- +source: apollo-federation/tests/extract_subgraphs.rs +expression: snapshot +--- +subgraph-a +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +scalar ExpensiveInt + +type Query { + a: ExpensiveInt + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} + +subgraph-b +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +type Query { + b: [Int] + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} From 26e7dbf056bb3a73cdb5e04a472b330c1504931f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Thu, 15 Aug 2024 16:52:56 +0200 Subject: [PATCH 058/108] fix: make new `tracing.propagation.format` config optional (#5824) --- .../apollo_router__configuration__tests__schema_generation.snap | 1 - apollo-router/src/plugins/telemetry/config.rs | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index e615c2b834..37a2a352b9 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -4635,7 +4635,6 @@ expression: "&schema" } }, "required": [ - "format", "header_name" ], "type": "object" diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index 5a506401a8..797c838ed0 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -324,6 +324,7 @@ pub(crate) struct RequestPropagation { pub(crate) header_name: Option, /// The trace ID format that will be used when propagating to subgraph services. + #[serde(default)] pub(crate) format: TraceIdFormat, } From d564b3308dd5ea54f51b637ef3ab640e7210835c Mon Sep 17 00:00:00 2001 From: Tyler Bloom Date: Thu, 15 Aug 2024 11:49:06 -0400 Subject: [PATCH 059/108] Fixed bugs in subgraph jump minimizing optimization (#5820) --- .../src/query_graph/graph_path.rs | 100 +++++++++--------- ...debug_max_evaluated_plans_configuration.rs | 30 +++--- .../allows_setting_down_to_1.graphql | 2 +- ...root_when_a_more_direct_one_exists.graphql | 2 +- .../can_be_set_to_an_arbitrary_number.graphql | 2 +- ...ndividually_optimal_branch_options.graphql | 2 +- ...e_complex_fetch_group_dependencies.graphql | 2 +- ...key_field_to_fetch_that_same_field.graphql | 2 +- ...erflow_in_reduce_options_if_needed.graphql | 2 +- .../supergraphs/works_when_unset.graphql | 2 +- 10 files changed, 71 insertions(+), 75 deletions(-) diff --git a/apollo-federation/src/query_graph/graph_path.rs b/apollo-federation/src/query_graph/graph_path.rs index ea45c00d1d..02e40205ee 100644 --- a/apollo-federation/src/query_graph/graph_path.rs +++ b/apollo-federation/src/query_graph/graph_path.rs @@ -880,6 +880,11 @@ where let mut edges = self.edges.clone(); let mut edge_triggers = self.edge_triggers.clone(); let mut edge_conditions = self.edge_conditions.clone(); + let mut last_subgraph_entering_edge_info = if defer.is_none() { + self.last_subgraph_entering_edge_info.clone() + } else { + None + }; let Some(new_edge) = edge.into() else { edges.push(edge); @@ -895,11 +900,7 @@ where // We clear `last_subgraph_entering_edge_info` as we enter a `@defer`. That is // because `last_subgraph_entering_edge_info` is used to eliminate some non-optimal // paths, but we don't want those optimizations to bypass a `@defer` application. - last_subgraph_entering_edge_info: if defer.is_some() { - None - } else { - self.last_subgraph_entering_edge_info.clone() - }, + last_subgraph_entering_edge_info, own_path_ids: self.own_path_ids.clone(), overriding_path_ids: self.overriding_path_ids.clone(), runtime_types_of_tail: Arc::new( @@ -1077,6 +1078,12 @@ where edges.push(edge); edge_triggers.push(Arc::new(trigger)); edge_conditions.push(condition_path_tree); + if defer.is_none() && self.graph.is_cross_subgraph_edge(new_edge)? { + last_subgraph_entering_edge_info = Some(SubgraphEnteringEdgeInfo { + index: self.edges.len() - 1, + conditions_cost: condition_cost, + }); + } return Ok(GraphPath { graph: self.graph.clone(), head: self.head, @@ -1089,16 +1096,7 @@ where // // PORT_NOTE: In the JS codebase, the information for the last subgraph-entering // is set incorrectly, in that the index is off by one. We fix that bug here. - last_subgraph_entering_edge_info: if defer.is_none() - && self.graph.is_cross_subgraph_edge(new_edge)? - { - Some(SubgraphEnteringEdgeInfo { - index: self.edges.len() - 1, - conditions_cost: condition_cost, - }) - } else { - None - }, + last_subgraph_entering_edge_info, own_path_ids: self.own_path_ids.clone(), overriding_path_ids: self.overriding_path_ids.clone(), runtime_types_of_tail: Arc::new(self.graph.advance_possible_runtime_types( @@ -1115,6 +1113,12 @@ where edges.push(edge); edge_triggers.push(Arc::new(trigger)); edge_conditions.push(condition_path_tree); + if defer.is_none() && self.graph.is_cross_subgraph_edge(new_edge)? { + last_subgraph_entering_edge_info = Some(SubgraphEnteringEdgeInfo { + index: self.edges.len(), + conditions_cost: condition_cost, + }); + } Ok(GraphPath { graph: self.graph.clone(), head: self.head, @@ -1124,16 +1128,7 @@ where edge_conditions, // Again, we don't want to set `last_subgraph_entering_edge_info` if we're entering a // `@defer` (see above). - last_subgraph_entering_edge_info: if defer.is_none() - && self.graph.is_cross_subgraph_edge(new_edge)? - { - Some(SubgraphEnteringEdgeInfo { - index: self.edges.len(), - conditions_cost: condition_cost, - }) - } else { - None - }, + last_subgraph_entering_edge_info, own_path_ids: self.own_path_ids.clone(), overriding_path_ids: self.overriding_path_ids.clone(), runtime_types_of_tail: Arc::new( @@ -1461,10 +1456,12 @@ where heap.push(HeapElement(self.clone())); while let Some(HeapElement(to_advance)) = heap.pop() { - let span = debug_span!("From {to_advance:?}"); + debug!("From {to_advance:?}"); + let span = debug_span!(" |"); let _guard = span.enter(); for edge in to_advance.next_edges()? { - let span = debug_span!("Testing edge {edge:?}"); + debug!("Testing edge {edge:?}"); + let span = debug_span!(" |"); let _guard = span.enter(); let edge_weight = self.graph.edge_weight(edge)?; if edge_weight.transition.collect_operation_elements() { @@ -1543,7 +1540,8 @@ where continue; } - let span = debug_span!("Validating conditions {edge_weight}"); + debug!("Validating conditions {edge_weight}"); + let span = debug_span!(" |"); let guard = span.enter(); // As we validate the condition for this edge, it might be necessary to jump to // another subgraph, but if for that we need to jump to the same subgraph we're @@ -1659,7 +1657,7 @@ where let last_subgraph_entering_edge_head_weight = self.graph.node_weight(last_subgraph_entering_edge_head)?; last_subgraph_entering_edge_head_weight.source - == last_subgraph_entering_edge_tail_weight.source + == edge_tail_weight.source }; let direct_path_end_node = @@ -1671,7 +1669,7 @@ where "Edge tail is unexpectedly a federated root", )); }; - self.check_direct_path_from_node( + to_advance.check_direct_path_from_node( last_subgraph_entering_edge_info.index + 1, direct_path_start_node, edge_tail_type_pos, @@ -1750,7 +1748,6 @@ where edge_tail_weight.source.clone(), Some((updated_path.clone(), cost)), ); - debug!("Using edge, advance path: {updated_path:?}"); // It can be necessary to "chain" keys, because different subgraphs may have // different keys exposed, and so we when we took a key, we want to check if // there is a new key we can now use that takes us to other subgraphs. For other @@ -2631,10 +2628,9 @@ impl OpGraphPath { debug!("Casting into requested type {field_parent_pos}"); Arc::new(IndexSet::from_iter([field_parent_pos.clone()])) } else { - if interface_path.is_some() { - debug!("No direct edge: type exploding interface {tail_weight} into possible runtime types {:?}", self.runtime_types_of_tail); - } else { - debug!("Type exploding interface {tail_weight} into possible runtime types {:?} as 2nd option", self.runtime_types_of_tail); + match &interface_path { + Some(_) => debug!("No direct edge: type exploding interface {tail_weight} into possible runtime types {:?}", self.runtime_types_of_tail), + None => debug!("Type exploding interface {tail_weight} into possible runtime types {:?} as 2nd option", self.runtime_types_of_tail), } self.runtime_types_of_tail.clone() }; @@ -2644,8 +2640,8 @@ impl OpGraphPath { // any gives us empty options, we bail. let mut options_for_each_implementation = vec![]; for implementation_type_pos in implementations.as_ref() { - let span = - debug_span!("Handling implementation {implementation_type_pos}"); + debug!("Handling implementation {implementation_type_pos}"); + let span = debug_span!(" |"); let guard = span.enter(); let implementation_inline_fragment = InlineFragment::new(InlineFragmentData { @@ -3094,7 +3090,7 @@ impl OpGraphPath { // account (it may very well be that whatever comes after `u` is not in `A`, for instance). let self_tail_weight = self.graph.node_weight(self.tail)?; let other_tail_weight = self.graph.node_weight(other.tail)?; - if self_tail_weight.source == other_tail_weight.source { + if self_tail_weight.source != other_tail_weight.source { // As described above, we want to know if one of the paths has no jumps at all (after // the common prefix) while the other has some. self.compare_subgraph_jumps_after_last_common_node(other) @@ -3236,9 +3232,9 @@ impl SimultaneousPaths { match (self.0.as_slice(), other.0.as_slice()) { ([a], [b]) => a.compare_single_path_options_complexity_out_of_context(b), ([a], _) => a.compare_single_vs_multi_path_options_complexity_out_of_context(other), - (_, [b]) => Ok(b - .compare_single_vs_multi_path_options_complexity_out_of_context(self)? - .reverse()), + (_, [b]) => b + .compare_single_vs_multi_path_options_complexity_out_of_context(self) + .map(Ordering::reverse), _ => Ok(Ordering::Equal), } } @@ -3349,8 +3345,12 @@ impl SimultaneousPathsWithLazyIndirectPaths { operation_element: &OpPathElement, condition_resolver: &mut impl ConditionResolver, ) -> Result>, FederationError> { - let span = debug_span!("Trying to advance paths for operation", paths = %self.paths, operation = %operation_element); - let _gaurd = span.enter(); + debug!( + "Trying to advance paths for operation: path = {}, operation = {operation_element}", + self.paths + ); + let span = debug_span!(" |"); + let _guard = span.enter(); let updated_context = self.context.with_context_of(operation_element)?; let mut options_for_each_path = vec![]; @@ -3358,13 +3358,15 @@ impl SimultaneousPathsWithLazyIndirectPaths { // references to `self`, which means cloning these paths when iterating. let paths = self.paths.0.clone(); for (path_index, path) in paths.iter().enumerate() { - let span = debug_span!("Computing options for {path}"); + debug!("Computing options for {path}"); + let span = debug_span!(" |"); let gaurd = span.enter(); let mut options = None; let should_reenter_subgraph = path.defer_on_tail.is_some() && matches!(operation_element, OpPathElement::Field(_)); if !should_reenter_subgraph { - let span = debug_span!("Direct options"); + debug!("Direct options"); + let span = debug_span!(" |"); let gaurd = span.enter(); let (advance_options, has_only_type_exploded_results) = path .advance_with_operation_element( @@ -3417,8 +3419,6 @@ impl SimultaneousPathsWithLazyIndirectPaths { // defer), that's ok, we'll just try with non-collecting edges. let mut options = options.unwrap_or_else(Vec::new); if let OpPathElement::Field(operation_field) = operation_element { - let span = debug_span!("Computing indirect paths:"); - let _gaurd = span.enter(); // Add whatever options can be obtained by taking some non-collecting edges first. let paths_with_non_collecting_edges = self .indirect_options(&updated_context, path_index, condition_resolver)? @@ -3428,13 +3428,11 @@ impl SimultaneousPathsWithLazyIndirectPaths { "{} indirect paths", paths_with_non_collecting_edges.paths.len() ); - let span = debug_span!("Validating indirect options:"); - let _gaurd = span.enter(); for paths_with_non_collecting_edges in paths_with_non_collecting_edges.paths.iter() { - let span = - debug_span!("For indirect path {paths_with_non_collecting_edges}:"); + debug!("For indirect path {paths_with_non_collecting_edges}:"); + let span = debug_span!(" |"); let _gaurd = span.enter(); let (advance_options, _) = paths_with_non_collecting_edges .advance_with_operation_element( diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/debug_max_evaluated_plans_configuration.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/debug_max_evaluated_plans_configuration.rs index 690ad722a7..3147c44fa7 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/debug_max_evaluated_plans_configuration.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/debug_max_evaluated_plans_configuration.rs @@ -15,7 +15,7 @@ const SUBGRAPH: &str = r#" type Query { t: T @shareable } - + type T @key(fields: "id") @shareable { id: ID! v1: Int @@ -200,7 +200,7 @@ fn correctly_generate_plan_built_from_some_non_individually_optimal_branch_optio type Query { t: T @shareable } - + type T { x: Int @shareable } @@ -209,7 +209,7 @@ fn correctly_generate_plan_built_from_some_non_individually_optimal_branch_optio type Query { t: T @shareable } - + type T @key(fields: "id") { id: ID! } @@ -276,7 +276,7 @@ fn does_not_error_on_some_complex_fetch_group_dependencies() { type Query { me: User @shareable } - + type User { id: ID! @shareable } @@ -285,12 +285,12 @@ fn does_not_error_on_some_complex_fetch_group_dependencies() { type Query { me: User @shareable } - + type User @key(fields: "id") { id: ID! p: Props } - + type Props { id: ID! @shareable } @@ -299,29 +299,29 @@ fn does_not_error_on_some_complex_fetch_group_dependencies() { type Query { me: User @shareable } - + type User { id: ID! @shareable } - + type Props @key(fields: "id") { id: ID! v0: Int t: T } - + type T { id: ID! v1: V v2: V - + # Note: this field is not queried, but matters to the reproduction this test exists # for because it prevents some optimizations that would happen without it (namely, # without it, the planner would notice that everything after type T is guaranteed # to be local to the subgraph). user: User } - + type V { x: Int } @@ -396,7 +396,7 @@ fn does_not_evaluate_plans_relying_on_a_key_field_to_fetch_that_same_field() { type Query { t: T } - + type T @key(fields: "otherId") { otherId: ID! } @@ -468,8 +468,6 @@ fn does_not_evaluate_plans_relying_on_a_key_field_to_fetch_that_same_field() { } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: investigate this failure fn avoid_considering_indirect_paths_from_the_root_when_a_more_direct_one_exists() { // Each of id/v0 can have 2 options each, so that's 4 combinations. If we were to consider 2 options for each // v1 value however, that would multiple it by 2 each times, so it would 32 possibilities. We limit the number of @@ -487,7 +485,7 @@ fn avoid_considering_indirect_paths_from_the_root_when_a_more_direct_one_exists( type Query { t: T @shareable } - + type T @key(fields: "id") { id: ID! v0: Int @shareable @@ -497,7 +495,7 @@ fn avoid_considering_indirect_paths_from_the_root_when_a_more_direct_one_exists( type Query { t: T @shareable } - + type T @key(fields: "id") { id: ID! v0: Int @shareable diff --git a/apollo-federation/tests/query_plan/supergraphs/allows_setting_down_to_1.graphql b/apollo-federation/tests/query_plan/supergraphs/allows_setting_down_to_1.graphql index 7abb1b3fa0..0b6baf2e68 100644 --- a/apollo-federation/tests/query_plan/supergraphs/allows_setting_down_to_1.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/allows_setting_down_to_1.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: fd2cfde36cc3d0a981e6c3636aaeea3a6aad4424 +# Composed from subgraphs with hash: bf831e2e6890f60e5c8e93bc52ce549323cb23e8 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/avoid_considering_indirect_paths_from_the_root_when_a_more_direct_one_exists.graphql b/apollo-federation/tests/query_plan/supergraphs/avoid_considering_indirect_paths_from_the_root_when_a_more_direct_one_exists.graphql index 8c7da0d906..2a7e9c07f9 100644 --- a/apollo-federation/tests/query_plan/supergraphs/avoid_considering_indirect_paths_from_the_root_when_a_more_direct_one_exists.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/avoid_considering_indirect_paths_from_the_root_when_a_more_direct_one_exists.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: b4f21df2efd31ed379be10cafbb341c080179593 +# Composed from subgraphs with hash: 995342f0aeb7c35ebe233102083b817ae5d9b0a8 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/can_be_set_to_an_arbitrary_number.graphql b/apollo-federation/tests/query_plan/supergraphs/can_be_set_to_an_arbitrary_number.graphql index 7abb1b3fa0..0b6baf2e68 100644 --- a/apollo-federation/tests/query_plan/supergraphs/can_be_set_to_an_arbitrary_number.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/can_be_set_to_an_arbitrary_number.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: fd2cfde36cc3d0a981e6c3636aaeea3a6aad4424 +# Composed from subgraphs with hash: bf831e2e6890f60e5c8e93bc52ce549323cb23e8 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/correctly_generate_plan_built_from_some_non_individually_optimal_branch_options.graphql b/apollo-federation/tests/query_plan/supergraphs/correctly_generate_plan_built_from_some_non_individually_optimal_branch_options.graphql index 0a4b8a4af1..2766e3b307 100644 --- a/apollo-federation/tests/query_plan/supergraphs/correctly_generate_plan_built_from_some_non_individually_optimal_branch_options.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/correctly_generate_plan_built_from_some_non_individually_optimal_branch_options.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: 86f4ab2b6c51a81a9ecc193fdf764487cb8c7ac8 +# Composed from subgraphs with hash: 38b15e780cba3d9d7cb6288e027386e3d612102a schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/does_not_error_on_some_complex_fetch_group_dependencies.graphql b/apollo-federation/tests/query_plan/supergraphs/does_not_error_on_some_complex_fetch_group_dependencies.graphql index 9cef2b06fc..dbc5271859 100644 --- a/apollo-federation/tests/query_plan/supergraphs/does_not_error_on_some_complex_fetch_group_dependencies.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/does_not_error_on_some_complex_fetch_group_dependencies.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: fb4a75fb881e3766651b083f97e1ec452f842582 +# Composed from subgraphs with hash: f4f751d2b348c0947b2f1dbca4cea1c987ff7d02 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/does_not_evaluate_plans_relying_on_a_key_field_to_fetch_that_same_field.graphql b/apollo-federation/tests/query_plan/supergraphs/does_not_evaluate_plans_relying_on_a_key_field_to_fetch_that_same_field.graphql index 5aa9f72b5d..a55159cae8 100644 --- a/apollo-federation/tests/query_plan/supergraphs/does_not_evaluate_plans_relying_on_a_key_field_to_fetch_that_same_field.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/does_not_evaluate_plans_relying_on_a_key_field_to_fetch_that_same_field.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: 1d139caaa150bb8da0e7fe34dba387685d526c41 +# Composed from subgraphs with hash: 15c059c34b90d54a9c27d2ad67c89307a1280a1f schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/multiplication_overflow_in_reduce_options_if_needed.graphql b/apollo-federation/tests/query_plan/supergraphs/multiplication_overflow_in_reduce_options_if_needed.graphql index 7abb1b3fa0..0b6baf2e68 100644 --- a/apollo-federation/tests/query_plan/supergraphs/multiplication_overflow_in_reduce_options_if_needed.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/multiplication_overflow_in_reduce_options_if_needed.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: fd2cfde36cc3d0a981e6c3636aaeea3a6aad4424 +# Composed from subgraphs with hash: bf831e2e6890f60e5c8e93bc52ce549323cb23e8 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/works_when_unset.graphql b/apollo-federation/tests/query_plan/supergraphs/works_when_unset.graphql index 7abb1b3fa0..0b6baf2e68 100644 --- a/apollo-federation/tests/query_plan/supergraphs/works_when_unset.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/works_when_unset.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: fd2cfde36cc3d0a981e6c3636aaeea3a6aad4424 +# Composed from subgraphs with hash: bf831e2e6890f60e5c8e93bc52ce549323cb23e8 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) From b6060519ace8e29edb0c2cc1ec368e17fe39b38e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Thu, 15 Aug 2024 17:52:03 +0200 Subject: [PATCH 060/108] chore(federation): generate the same fragment names as JS (#5821) --- apollo-federation/src/operation/optimize.rs | 44 ++++++++++++++++- .../fragment_autogeneration.rs | 49 ++++++++++--------- 2 files changed, 68 insertions(+), 25 deletions(-) diff --git a/apollo-federation/src/operation/optimize.rs b/apollo-federation/src/operation/optimize.rs index 3590c34f1e..de72f6d74c 100644 --- a/apollo-federation/src/operation/optimize.rs +++ b/apollo-federation/src/operation/optimize.rs @@ -1624,6 +1624,8 @@ fn fragment_name(mut index: usize) -> Name { #[derive(Debug, Default)] struct FragmentGenerator { fragments: NamedFragments, + // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! + names: HashMap<(String, usize), usize>, } impl FragmentGenerator { @@ -1631,6 +1633,33 @@ impl FragmentGenerator { fragment_name(self.fragments.len()) } + // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! + // In the future, we will just use `.next_name()`. + fn generate_name(&mut self, frag: &InlineFragmentSelection) -> Name { + use std::fmt::Write as _; + + let type_condition = frag + .inline_fragment + .type_condition_position + .as_ref() + .map_or_else( + || "undefined".to_string(), + |condition| condition.to_string(), + ); + let selections = frag.selection_set.selections.len(); + let mut name = format!("_generated_on{type_condition}_{selections}"); + + let key = (type_condition, selections); + let index = self + .names + .entry(key) + .and_modify(|index| *index += 1) + .or_default(); + _ = write!(&mut name, "_{index}"); + + Name::new_unchecked(&name) + } + /// Is a selection set worth using for a newly generated named fragment? fn is_worth_using(selection_set: &SelectionSet) -> bool { let mut iter = selection_set.iter(); @@ -1697,6 +1726,17 @@ impl FragmentGenerator { continue; }; + // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! + // JS does not special-case @skip and @include. It never extracts a fragment if + // there's any directives on it. This code duplicates the body from the + // previous condition so it's very easy to remove when we're ready :) + if !skip_include.is_empty() { + new_selection_set.add_local_selection(&Selection::InlineFragment( + Arc::clone(candidate.get()), + ))?; + continue; + } + let existing = self.fragments.iter().find(|existing| { existing.type_condition_position == candidate.get().inline_fragment.casted_type() @@ -1706,7 +1746,9 @@ impl FragmentGenerator { let existing = if let Some(existing) = existing { existing } else { - let name = self.next_name(); + // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! + // This should be reverted to `self.next_name();` when we're ready. + let name = self.generate_name(candidate.get()); self.fragments.insert(Fragment { schema: selection_set.schema.clone(), name: name.clone(), diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs index 42e13f473d..69d4d4e5b3 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs @@ -53,14 +53,14 @@ fn it_respects_generate_query_fragments_option() { { t { __typename - ...a + ..._generated_onA_2_0 ... on B { z } } } - fragment a on A { + fragment _generated_onA_2_0 on A { x y } @@ -105,21 +105,21 @@ fn it_handles_nested_fragment_generation() { { t { __typename - ...b + ..._generated_onA_3_0 } } - fragment a on A { + fragment _generated_onA_2_0 on A { x y } - fragment b on A { + fragment _generated_onA_3_0 on A { x y t { __typename - ...a + ..._generated_onA_2_0 ... on B { z } @@ -159,11 +159,11 @@ fn it_handles_fragments_with_one_non_leaf_field() { { t { __typename - ...a + ..._generated_onA_1_0 } } - fragment a on A { + fragment _generated_onA_1_0 on A { t { __typename ... on B { @@ -219,22 +219,23 @@ fn it_migrates_skip_include() { { t { __typename - ...b + ..._generated_onA_3_0 } } - fragment a on A { - x - y - } - - fragment b on A { + fragment _generated_onA_3_0 on A { x y t { __typename - ...a @include(if: $var) - ...a @skip(if: $var) + ... on A @include(if: $var) { + x + y + } + ... on A @skip(if: $var) { + x + y + } ... on A @custom { x y @@ -276,15 +277,15 @@ fn it_identifies_and_reuses_equivalent_fragments_that_arent_identical() { { t { __typename - ...a + ..._generated_onA_2_0 } t2 { __typename - ...a + ..._generated_onA_2_0 } } - fragment a on A { + fragment _generated_onA_2_0 on A { x y } @@ -324,20 +325,20 @@ fn fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment { t { __typename - ...a + ..._generated_onA_2_0 } t2 { __typename - ...b + ..._generated_onA_2_1 } } - fragment a on A { + fragment _generated_onA_2_0 on A { x y } - fragment b on A { + fragment _generated_onA_2_1 on A { y z } From 904faa5d210f5435b47975becb2a6895b0947d4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Thu, 15 Aug 2024 18:54:01 +0200 Subject: [PATCH 061/108] chore(federation): coerce hardcoded GraphQL values to match JS (#5819) --- apollo-federation/src/compat.rs | 126 +++++++++++++++++++++++++ apollo-federation/src/operation/mod.rs | 2 + 2 files changed, 128 insertions(+) diff --git a/apollo-federation/src/compat.rs b/apollo-federation/src/compat.rs index 4af8e90010..701337714c 100644 --- a/apollo-federation/src/compat.rs +++ b/apollo-federation/src/compat.rs @@ -9,10 +9,13 @@ use apollo_compiler::ast::Value; use apollo_compiler::collections::IndexMap; +use apollo_compiler::executable; use apollo_compiler::schema::Directive; use apollo_compiler::schema::ExtendedType; use apollo_compiler::schema::InputValueDefinition; use apollo_compiler::schema::Type; +use apollo_compiler::validation::Valid; +use apollo_compiler::ExecutableDocument; use apollo_compiler::Name; use apollo_compiler::Node; use apollo_compiler::Schema; @@ -281,6 +284,86 @@ pub fn coerce_schema_default_values(schema: &mut Schema) { } } +fn coerce_directive_application_values( + schema: &Valid, + directives: &mut executable::DirectiveList, +) { + for directive in directives { + let Some(definition) = schema.directive_definitions.get(&directive.name) else { + continue; + }; + let directive = directive.make_mut(); + for arg in &mut directive.arguments { + let Some(definition) = definition.argument_by_name(&arg.name) else { + continue; + }; + let arg = arg.make_mut(); + _ = coerce_value(&schema.types, &mut arg.value, &definition.ty); + } + } +} + +fn coerce_selection_set_values( + schema: &Valid, + selection_set: &mut executable::SelectionSet, +) { + for selection in &mut selection_set.selections { + match selection { + executable::Selection::Field(field) => { + let definition = field.definition.clone(); // Clone so we can mutate `field`. + let field = field.make_mut(); + for arg in &mut field.arguments { + let Some(definition) = definition.argument_by_name(&arg.name) else { + continue; + }; + let arg = arg.make_mut(); + _ = coerce_value(&schema.types, &mut arg.value, &definition.ty); + } + coerce_directive_application_values(schema, &mut field.directives); + coerce_selection_set_values(schema, &mut field.selection_set); + } + executable::Selection::FragmentSpread(frag) => { + let frag = frag.make_mut(); + coerce_directive_application_values(schema, &mut frag.directives); + } + executable::Selection::InlineFragment(frag) => { + let frag = frag.make_mut(); + coerce_directive_application_values(schema, &mut frag.directives); + coerce_selection_set_values(schema, &mut frag.selection_set); + } + } + } +} + +fn coerce_operation_values(schema: &Valid, operation: &mut Node) { + let operation = operation.make_mut(); + + for variable in &mut operation.variables { + let variable = variable.make_mut(); + let Some(default_value) = &mut variable.default_value else { + continue; + }; + + // On error, the default value is invalid. This would have been caught by validation. + // In schemas, we explicitly remove the default value if it's invalid, to match the JS + // query planner behaviour. + // In queries, I hope we can just reject queries with invalid default values instead of + // silently doing the wrong thing :) + _ = coerce_value(&schema.types, default_value, &variable.ty); + } + + coerce_selection_set_values(schema, &mut operation.selection_set); +} + +pub fn coerce_executable_values(schema: &Valid, document: &mut ExecutableDocument) { + if let Some(operation) = &mut document.operations.anonymous { + coerce_operation_values(schema, operation); + } + for operation in document.operations.named.values_mut() { + coerce_operation_values(schema, operation); + } +} + /// Applies default value coercion and removes non-semantic directives so that /// the apollo-rs serialized output of the schema matches the result of /// `printSchema(buildSchema()` in graphql-js. @@ -288,3 +371,46 @@ pub fn make_print_schema_compatible(schema: &mut Schema) { remove_non_semantic_directives(schema); coerce_schema_default_values(schema); } + +#[cfg(test)] +mod tests { + use apollo_compiler::validation::Valid; + use apollo_compiler::ExecutableDocument; + use apollo_compiler::Schema; + + use super::coerce_executable_values; + + fn parse_and_coerce(schema: &Valid, input: &str) -> String { + let mut document = ExecutableDocument::parse(schema, input, "test.graphql").unwrap(); + coerce_executable_values(schema, &mut document); + document.to_string() + } + + #[test] + fn coerces_list_values() { + let schema = Schema::parse_and_validate( + r#" + type Query { + test( + bools: [Boolean], + ints: [Int], + strings: [String], + floats: [Float], + ): Int + } + "#, + "schema.graphql", + ) + .unwrap(); + + insta::assert_snapshot!(parse_and_coerce(&schema, r#" + { + test(bools: true, ints: 1, strings: "string", floats: 2.0) + } + "#), @r#" + { + test(bools: [true], ints: [1], strings: ["string"], floats: [2.0]) + } + "#); + } +} diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index 66c4bc4654..5739e48bfe 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -32,6 +32,7 @@ use apollo_compiler::Name; use apollo_compiler::Node; use serde::Serialize; +use crate::compat::coerce_executable_values; use crate::error::FederationError; use crate::error::SingleFederationError; use crate::error::SingleFederationError::Internal; @@ -3857,6 +3858,7 @@ impl TryFrom for Valid { let mut document = executable::ExecutableDocument::new(); document.fragments = fragments; document.operations.insert(operation); + coerce_executable_values(value.schema.schema(), &mut document); Ok(document.validate(value.schema.schema())?) } } From ada39207348e43a0566ecbbe327c1353b77caf95 Mon Sep 17 00:00:00 2001 From: "Sachin D. Shinde" Date: Thu, 15 Aug 2024 18:25:57 -0700 Subject: [PATCH 062/108] fix(federation): change `HashMap`/`HashSet` usage to `IndexMap`/`IndexSet` to avoid non-determinism (#5827) --- .../src/link/cost_spec_definition.rs | 7 +- apollo-federation/src/link/database.rs | 10 +-- apollo-federation/src/link/mod.rs | 10 +-- apollo-federation/src/merge.rs | 5 +- apollo-federation/src/operation/contains.rs | 5 +- apollo-federation/src/operation/mod.rs | 43 ++++++------ apollo-federation/src/operation/optimize.rs | 35 ++++++---- .../extract_subgraphs_from_supergraph.rs | 21 +++--- .../src/query_plan/fetch_dependency_graph.rs | 16 ++--- .../fetch_dependency_graph_processor.rs | 6 +- .../src/query_plan/query_planner.rs | 3 +- .../src/sources/connect/url_path_template.rs | 3 +- .../query_plan/build_query_plan_support.rs | 6 +- .../build_query_plan_tests/requires.rs | 70 +++++++++---------- 14 files changed, 117 insertions(+), 123 deletions(-) diff --git a/apollo-federation/src/link/cost_spec_definition.rs b/apollo-federation/src/link/cost_spec_definition.rs index b3c2b09615..db49185b04 100644 --- a/apollo-federation/src/link/cost_spec_definition.rs +++ b/apollo-federation/src/link/cost_spec_definition.rs @@ -1,7 +1,6 @@ -use std::collections::HashMap; - use apollo_compiler::ast::Argument; use apollo_compiler::ast::Directive; +use apollo_compiler::collections::IndexMap; use apollo_compiler::name; use apollo_compiler::schema::Component; use apollo_compiler::schema::EnumType; @@ -41,7 +40,7 @@ macro_rules! propagate_demand_control_directives { subgraph_schema: &FederationSchema, source: &$directives_ty, dest: &mut $directives_ty, - original_directive_names: &HashMap, + original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let cost_directive_name = original_directive_names.get(&COST_DIRECTIVE_NAME_IN_SPEC); let cost_directive = cost_directive_name.and_then(|name| source.get(name.as_str())); @@ -75,7 +74,7 @@ macro_rules! propagate_demand_control_directives_to_position { subgraph_schema: &mut FederationSchema, source: &Node<$source_ty>, dest: &$dest_ty, - original_directive_names: &HashMap, + original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let cost_directive_name = original_directive_names.get(&COST_DIRECTIVE_NAME_IN_SPEC); let cost_directive = diff --git a/apollo-federation/src/link/database.rs b/apollo-federation/src/link/database.rs index d96c8ecc4b..94ea7ba0ff 100644 --- a/apollo-federation/src/link/database.rs +++ b/apollo-federation/src/link/database.rs @@ -1,9 +1,9 @@ use std::borrow::Cow; -use std::collections::HashMap; use std::sync::Arc; use apollo_compiler::ast::Directive; use apollo_compiler::ast::DirectiveLocation; +use apollo_compiler::collections::IndexMap; use apollo_compiler::schema::DirectiveDefinition; use apollo_compiler::ty; use apollo_compiler::Schema; @@ -46,10 +46,10 @@ pub fn links_metadata(schema: &Schema) -> Result, LinkErro // all of the @link usages (starting with the bootstrapping one) and extract their metadata. let link_name_in_schema = &bootstrap_directive.name; let mut links = Vec::new(); - let mut by_identity = HashMap::new(); - let mut by_name_in_schema = HashMap::new(); - let mut types_by_imported_name = HashMap::new(); - let mut directives_by_imported_name = HashMap::new(); + let mut by_identity = IndexMap::default(); + let mut by_name_in_schema = IndexMap::default(); + let mut types_by_imported_name = IndexMap::default(); + let mut directives_by_imported_name = IndexMap::default(); let link_applications = schema .schema_definition .directives diff --git a/apollo-federation/src/link/mod.rs b/apollo-federation/src/link/mod.rs index 1fe442ab9f..96473e59db 100644 --- a/apollo-federation/src/link/mod.rs +++ b/apollo-federation/src/link/mod.rs @@ -1,10 +1,10 @@ -use std::collections::HashMap; use std::fmt; use std::str; use std::sync::Arc; use apollo_compiler::ast::Directive; use apollo_compiler::ast::Value; +use apollo_compiler::collections::IndexMap; use apollo_compiler::name; use apollo_compiler::schema::Component; use apollo_compiler::InvalidNameError; @@ -387,10 +387,10 @@ pub struct LinkedElement { #[derive(Default, Eq, PartialEq, Debug)] pub struct LinksMetadata { pub(crate) links: Vec>, - pub(crate) by_identity: HashMap>, - pub(crate) by_name_in_schema: HashMap>, - pub(crate) types_by_imported_name: HashMap, Arc)>, - pub(crate) directives_by_imported_name: HashMap, Arc)>, + pub(crate) by_identity: IndexMap>, + pub(crate) by_name_in_schema: IndexMap>, + pub(crate) types_by_imported_name: IndexMap, Arc)>, + pub(crate) directives_by_imported_name: IndexMap, Arc)>, } impl LinksMetadata { diff --git a/apollo-federation/src/merge.rs b/apollo-federation/src/merge.rs index aaa8af294b..5c5531c5ec 100644 --- a/apollo-federation/src/merge.rs +++ b/apollo-federation/src/merge.rs @@ -1,4 +1,3 @@ -use std::collections::HashSet; use std::fmt::Debug; use std::fmt::Formatter; use std::iter; @@ -1582,8 +1581,8 @@ fn add_core_feature_inaccessible(supergraph: &mut Schema) { // TODO use apollo_compiler::executable::FieldSet fn parse_keys<'a>( directives: impl Iterator> + Sized, -) -> HashSet<&'a str> { - HashSet::from_iter( +) -> IndexSet<&'a str> { + IndexSet::from_iter( directives .flat_map(|k| { let field_set = directive_string_arg_value(k, &name!("fields")).unwrap(); diff --git a/apollo-federation/src/operation/contains.rs b/apollo-federation/src/operation/contains.rs index d947a8faf2..9b306504a2 100644 --- a/apollo-federation/src/operation/contains.rs +++ b/apollo-federation/src/operation/contains.rs @@ -1,5 +1,4 @@ -use std::collections::HashMap; - +use apollo_compiler::collections::IndexMap; use apollo_compiler::executable; use apollo_compiler::Name; use apollo_compiler::Node; @@ -146,7 +145,7 @@ fn same_arguments( let right = right .iter() .map(|arg| (&arg.name, arg)) - .collect::>(); + .collect::>(); left.iter().all(|arg| { right diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index 5739e48bfe..a6ed20d3f6 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -13,8 +13,6 @@ //! [`Field`], and the selection type is [`FieldSelection`]. use std::borrow::Cow; -use std::collections::HashMap; -use std::collections::HashSet; use std::fmt::Display; use std::fmt::Formatter; use std::hash::Hash; @@ -103,7 +101,7 @@ pub struct Operation { pub(crate) struct NormalizedDefer { pub operation: Operation, pub has_defers: bool, - pub assigned_defer_labels: HashSet, + pub assigned_defer_labels: IndexSet, pub defer_conditions: IndexMap>, } @@ -152,7 +150,7 @@ impl Operation { NormalizedDefer { operation: self, has_defers: false, - assigned_defer_labels: HashSet::new(), + assigned_defer_labels: IndexSet::default(), defer_conditions: IndexMap::default(), } // TODO(@TylerBloom): Once defer is implement, the above statement needs to be replaced @@ -164,7 +162,7 @@ impl Operation { NormalizedDefer { operation: self, has_defers: false, - assigned_defer_labels: HashSet::new(), + assigned_defer_labels: IndexSet::default(), defer_conditions: IndexMap::default(), } } @@ -2652,7 +2650,8 @@ impl SelectionSet { return Ok(self.clone()); } - let mut at_current_level: HashMap = HashMap::new(); + let mut at_current_level: IndexMap = + IndexMap::default(); let mut remaining: Vec<&FieldToAlias> = Vec::new(); for alias in aliases { @@ -2929,7 +2928,7 @@ fn compute_aliases_for_non_merging_fields( alias_collector: &mut Vec, schema: &ValidFederationSchema, ) -> Result<(), FederationError> { - let mut seen_response_names: HashMap = HashMap::new(); + let mut seen_response_names: IndexMap = IndexMap::default(); // - `s.selections` must be fragment-spread-free. fn rebased_fields_in_set(s: &SelectionSetAtPath) -> impl Iterator + '_ { @@ -3061,7 +3060,7 @@ fn compute_aliases_for_non_merging_fields( Ok(()) } -fn gen_alias_name(base_name: &Name, unavailable_names: &HashMap) -> Name { +fn gen_alias_name(base_name: &Name, unavailable_names: &IndexMap) -> Name { let mut counter = 0usize; loop { if let Ok(name) = Name::try_from(format!("{base_name}__alias_{counter}")) { @@ -3419,7 +3418,7 @@ impl NamedFragments { // the outcome of `map_to_expanded_selection_sets`. let mut fragments_map: IndexMap = IndexMap::default(); for fragment in fragments.values() { - let mut fragment_usages = HashMap::new(); + let mut fragment_usages = IndexMap::default(); NamedFragments::collect_fragment_usages(&fragment.selection_set, &mut fragment_usages); let usages: Vec = fragment_usages.keys().cloned().collect::>(); fragments_map.insert( @@ -3431,7 +3430,7 @@ impl NamedFragments { ); } - let mut removed_fragments: HashSet = HashSet::new(); + let mut removed_fragments: IndexSet = IndexSet::default(); let mut mapped_fragments = NamedFragments::default(); while !fragments_map.is_empty() { // Note that graphQL specifies that named fragments cannot have cycles (https://spec.graphql.org/draft/#sec-Fragment-spreads-must-not-form-cycles) @@ -3449,7 +3448,7 @@ impl NamedFragments { // JS code has methods for // * add and throw exception if entry already there // * add_if_not_exists - // Rust HashMap exposes insert (that overwrites) and try_insert (that throws) + // Rust IndexMap exposes insert (that overwrites) and try_insert (that throws) mapped_fragments.insert(normalized); } else { removed_fragments.insert(name.clone()); @@ -3465,7 +3464,7 @@ impl NamedFragments { /// Just like our `SelectionSet::used_fragments`, but with apollo-compiler types fn collect_fragment_usages( selection_set: &executable::SelectionSet, - aggregator: &mut HashMap, + aggregator: &mut IndexMap, ) { selection_set.selections.iter().for_each(|s| match s { executable::Selection::Field(f) => { @@ -3512,7 +3511,7 @@ impl NamedFragments { // Collect fragment usages from operation types. impl Selection { - fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { + fn collect_used_fragment_names(&self, aggregator: &mut IndexMap) { match self { Selection::Field(field_selection) => { if let Some(s) = &field_selection.selection_set { @@ -3533,28 +3532,28 @@ impl Selection { } impl SelectionSet { - pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { + pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut IndexMap) { for s in self.selections.values() { s.collect_used_fragment_names(aggregator); } } - pub(crate) fn used_fragments(&self) -> HashMap { - let mut usages = HashMap::new(); + pub(crate) fn used_fragments(&self) -> IndexMap { + let mut usages = IndexMap::default(); self.collect_used_fragment_names(&mut usages); usages } } impl Fragment { - pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { + pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut IndexMap) { self.selection_set.collect_used_fragment_names(aggregator) } } impl NamedFragments { /// Collect the usages of fragments that are used within the selection of other fragments. - pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { + pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut IndexMap) { for fragment in self.fragments.values() { fragment .selection_set @@ -3566,7 +3565,7 @@ impl NamedFragments { // Collect used variables from operation types. pub(crate) struct VariableCollector<'s> { - variables: HashSet<&'s Name>, + variables: IndexSet<&'s Name>, } impl<'s> VariableCollector<'s> { @@ -3655,14 +3654,14 @@ impl<'s> VariableCollector<'s> { } /// Consume the collector and return the collected names. - pub(crate) fn into_inner(self) -> HashSet<&'s Name> { + pub(crate) fn into_inner(self) -> IndexSet<&'s Name> { self.variables } } impl Fragment { /// Returns the variable names that are used by this fragment. - pub(crate) fn used_variables(&self) -> HashSet<&'_ Name> { + pub(crate) fn used_variables(&self) -> IndexSet<&'_ Name> { let mut collector = VariableCollector::new(); collector.visit_directive_list(&self.directives); collector.visit_selection_set(&self.selection_set); @@ -3673,7 +3672,7 @@ impl Fragment { impl SelectionSet { /// Returns the variable names that are used by this selection set, including through fragment /// spreads. - pub(crate) fn used_variables(&self) -> HashSet<&'_ Name> { + pub(crate) fn used_variables(&self) -> IndexSet<&'_ Name> { let mut collector = VariableCollector::new(); collector.visit_selection_set(self); collector.into_inner() diff --git a/apollo-federation/src/operation/optimize.rs b/apollo-federation/src/operation/optimize.rs index de72f6d74c..de25d12746 100644 --- a/apollo-federation/src/operation/optimize.rs +++ b/apollo-federation/src/operation/optimize.rs @@ -35,10 +35,10 @@ //! ## `reuse_fragments` methods (putting everything together) //! Recursive optimization of selection and selection sets. -use std::collections::HashMap; -use std::collections::HashSet; use std::sync::Arc; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::executable; use apollo_compiler::executable::VariableDefinition; use apollo_compiler::Name; @@ -67,7 +67,7 @@ use crate::schema::position::CompositeTypeDefinitionPosition; #[derive(Debug)] struct ReuseContext<'a> { fragments: &'a NamedFragments, - operation_variables: Option>, + operation_variables: Option>, } impl<'a> ReuseContext<'a> { @@ -392,7 +392,7 @@ impl NamedFragments { // `Option`. However, `None` validator makes it clearer that validation is // unnecessary. struct FieldsConflictValidator { - by_response_name: HashMap>>>, + by_response_name: IndexMap>>>, } impl FieldsConflictValidator { @@ -406,7 +406,8 @@ impl FieldsConflictValidator { fn for_level<'a>(level: &[&'a SelectionSet]) -> Self { // Group `level`'s fields by the response-name/field - let mut at_level: HashMap>> = HashMap::new(); + let mut at_level: IndexMap>> = + IndexMap::default(); for selection_set in level { for field_selection in selection_set.field_selections() { let response_name = field_selection.field.response_name(); @@ -421,10 +422,10 @@ impl FieldsConflictValidator { } // Collect validators per response-name/field - let mut by_response_name = HashMap::new(); + let mut by_response_name = IndexMap::default(); for (response_name, fields) in at_level { - let mut at_response_name: HashMap>> = - HashMap::new(); + let mut at_response_name: IndexMap>> = + IndexMap::default(); for (field, selection_sets) in fields { if selection_sets.is_empty() { at_response_name.insert(field, None); @@ -631,7 +632,7 @@ struct FragmentRestrictionAtType { #[derive(Default)] struct FragmentRestrictionAtTypeCache { - map: HashMap<(Name, CompositeTypeDefinitionPosition), Arc>, + map: IndexMap<(Name, CompositeTypeDefinitionPosition), Arc>, } impl FragmentRestrictionAtTypeCache { @@ -644,8 +645,8 @@ impl FragmentRestrictionAtTypeCache { // the lifetime does not really want to work out. // (&'cache mut self) -> Result<&'cache FragmentRestrictionAtType> match self.map.entry((fragment.name.clone(), ty.clone())) { - std::collections::hash_map::Entry::Occupied(entry) => Ok(Arc::clone(entry.get())), - std::collections::hash_map::Entry::Vacant(entry) => Ok(Arc::clone( + indexmap::map::Entry::Occupied(entry) => Ok(Arc::clone(entry.get())), + indexmap::map::Entry::Vacant(entry) => Ok(Arc::clone( entry.insert(Arc::new(fragment.expanded_selection_set_at_type(ty)?)), )), } @@ -866,7 +867,7 @@ impl SelectionSet { ) { // Note: It's not possible for two fragments to include each other. So, we don't need to // worry about inclusion cycles. - let included_fragments: HashSet = applicable_fragments + let included_fragments: IndexSet = applicable_fragments .iter() .filter(|(fragment, _)| { applicable_fragments @@ -1249,8 +1250,12 @@ impl NamedFragments { ) } - fn update_usages(usages: &mut HashMap, fragment: &Node, usage_count: u32) { - let mut inner_usages = HashMap::new(); + fn update_usages( + usages: &mut IndexMap, + fragment: &Node, + usage_count: u32, + ) { + let mut inner_usages = IndexMap::default(); fragment.collect_used_fragment_names(&mut inner_usages); for (name, inner_count) in inner_usages { @@ -1625,7 +1630,7 @@ fn fragment_name(mut index: usize) -> Name { struct FragmentGenerator { fragments: NamedFragments, // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! - names: HashMap<(String, usize), usize>, + names: IndexMap<(String, usize), usize>, } impl FragmentGenerator { diff --git a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs b/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs index c714cc21b6..611dfaa309 100644 --- a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs +++ b/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs @@ -1,5 +1,4 @@ use std::collections::BTreeMap; -use std::collections::HashMap; use std::fmt; use std::fmt::Write; use std::ops::Deref; @@ -297,7 +296,7 @@ pub(crate) fn new_empty_fed_2_subgraph_schema() -> Result + // IndexMap subgraph_info: IndexMap, } @@ -329,8 +328,8 @@ struct TypeInfos { /// when a custom directive's name conflicts with that of a default one. fn get_apollo_directive_names( supergraph_schema: &FederationSchema, -) -> Result, FederationError> { - let mut hm: HashMap = HashMap::new(); +) -> Result, FederationError> { + let mut hm: IndexMap = IndexMap::default(); for directive in &supergraph_schema.schema().schema_definition.directives { if directive.name.as_str() == "link" { if let Ok(link) = Link::from_directive_application(directive) { @@ -486,7 +485,7 @@ fn add_all_empty_subgraph_types( federation_spec_definitions: &IndexMap, join_spec_definition: &'static JoinSpecDefinition, filtered_types: &Vec, - original_directive_names: &HashMap, + original_directive_names: &IndexMap, ) -> Result { let type_directive_definition = join_spec_definition.type_directive_definition(supergraph_schema)?; @@ -788,7 +787,7 @@ fn extract_object_type_content( federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], - original_directive_names: &HashMap, + original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_definition = join_spec_definition.field_directive_definition(supergraph_schema)?; @@ -964,7 +963,7 @@ fn extract_interface_type_content( federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], - original_directive_names: &HashMap, + original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_definition = join_spec_definition.field_directive_definition(supergraph_schema)?; @@ -1252,7 +1251,7 @@ fn extract_enum_type_content( federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], - original_directive_names: &HashMap, + original_directive_names: &IndexMap, ) -> Result<(), FederationError> { // This was added in join 0.3, so it can genuinely be None. let enum_value_directive_definition = @@ -1361,7 +1360,7 @@ fn extract_input_object_type_content( federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], - original_directive_names: &HashMap, + original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_definition = join_spec_definition.field_directive_definition(supergraph_schema)?; @@ -1468,7 +1467,7 @@ fn add_subgraph_field( is_shareable: bool, field_directive_application: Option<&FieldDirectiveArguments>, cost_spec_definition: Option<&'static CostSpecDefinition>, - original_directive_names: &HashMap, + original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_application = field_directive_application.unwrap_or_else(|| &FieldDirectiveArguments { @@ -1583,7 +1582,7 @@ fn add_subgraph_input_field( subgraph: &mut FederationSubgraph, field_directive_application: Option<&FieldDirectiveArguments>, cost_spec_definition: Option<&'static CostSpecDefinition>, - original_directive_names: &HashMap, + original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_application = field_directive_application.unwrap_or_else(|| &FieldDirectiveArguments { diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index 34a86e6ae9..d14e57bad1 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -1,5 +1,3 @@ -use std::collections::HashMap; -use std::collections::HashSet; use std::fmt::Write as _; use std::iter; use std::ops::Deref; @@ -928,7 +926,7 @@ impl FetchDependencyGraph { /// edges. In RS implementation we first collect the edges and then remove them. This has a side /// effect that if we ever end up with a cycle in a graph (which is an invalid state), this method /// may result in infinite loop. - fn collect_redundant_edges(&self, node_index: NodeIndex, acc: &mut HashSet) { + fn collect_redundant_edges(&self, node_index: NodeIndex, acc: &mut IndexSet) { let mut stack = vec![]; for start_index in self.children_of(node_index) { stack.extend(self.children_of(start_index)); @@ -946,7 +944,7 @@ impl FetchDependencyGraph { /// If any deeply nested child of this node has an edge to any direct child of this node, the /// direct child is removed, as we know it is also reachable through the deeply nested route. fn remove_redundant_edges(&mut self, node_index: NodeIndex) { - let mut redundant_edges = HashSet::new(); + let mut redundant_edges = IndexSet::default(); self.collect_redundant_edges(node_index, &mut redundant_edges); if !redundant_edges.is_empty() { @@ -1005,7 +1003,7 @@ impl FetchDependencyGraph { // Two phases for mutability reasons: first all redundant edges coming out of all nodes are // collected and then they are all removed. - let mut redundant_edges = HashSet::new(); + let mut redundant_edges = IndexSet::default(); for node_index in self.graph.node_indices() { self.collect_redundant_edges(node_index, &mut redundant_edges); } @@ -1063,7 +1061,7 @@ impl FetchDependencyGraph { node.selection_set.selection_set.selections.is_empty() && !self.is_root_node(node_index, node) }; - let to_remove: HashSet = self + let to_remove: IndexSet = self .graph .node_references() .filter_map(|(node_index, node)| is_removable(node_index, node).then_some(node_index)) @@ -1302,7 +1300,7 @@ impl FetchDependencyGraph { .any(|input| input.contains(selection))); }; - let impl_type_names: HashSet<_> = self + let impl_type_names: IndexSet<_> = self .supergraph_schema .possible_runtime_types(condition_in_supergraph.clone().into())? .iter() @@ -1769,7 +1767,7 @@ impl FetchDependencyGraph { let handled_defers_in_current = defers_in_current .iter() .map(|info| info.label.clone()) - .collect::>(); + .collect::>(); let unhandled_defer_nodes = all_deferred_nodes .keys() .filter(|label| !handled_defers_in_current.contains(*label)) @@ -2106,7 +2104,7 @@ impl FetchDependencyGraph { merged_id: NodeIndex, path_in_this: &OpPath, ) { - let mut new_parent_relations = HashMap::new(); + let mut new_parent_relations = IndexMap::default(); for child_id in self.children_of(merged_id) { // This could already be a child of `this`. Typically, we can have case where we have: // 1 diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs b/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs index e15134e4ab..75f945e2b2 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs @@ -1,6 +1,6 @@ -use std::collections::HashSet; use std::sync::Arc; +use apollo_compiler::collections::IndexSet; use apollo_compiler::executable::DirectiveList; use apollo_compiler::executable::VariableDefinition; use apollo_compiler::Name; @@ -50,7 +50,7 @@ pub(crate) struct FetchDependencyGraphToQueryPlanProcessor { operation_directives: Arc, operation_compression: SubgraphOperationCompression, operation_name: Option, - assigned_defer_labels: Option>, + assigned_defer_labels: Option>, counter: u32, } @@ -248,7 +248,7 @@ impl FetchDependencyGraphToQueryPlanProcessor { operation_directives: Arc, operation_compression: SubgraphOperationCompression, operation_name: Option, - assigned_defer_labels: Option>, + assigned_defer_labels: Option>, ) -> Self { Self { variable_definitions, diff --git a/apollo-federation/src/query_plan/query_planner.rs b/apollo-federation/src/query_plan/query_planner.rs index 9e9f87c149..84f392340e 100644 --- a/apollo-federation/src/query_plan/query_planner.rs +++ b/apollo-federation/src/query_plan/query_planner.rs @@ -2,7 +2,6 @@ use std::cell::Cell; use std::num::NonZeroU32; use std::sync::Arc; -use apollo_compiler::collections::HashMap; use apollo_compiler::collections::IndexMap; use apollo_compiler::collections::IndexSet; use apollo_compiler::validation::Valid; @@ -778,7 +777,7 @@ fn compute_plan_for_defer_conditionals( pub(crate) struct RebasedFragments { original_fragments: NamedFragments, /// Map key: subgraph name - rebased_fragments: HashMap, NamedFragments>, + rebased_fragments: IndexMap, NamedFragments>, } impl RebasedFragments { diff --git a/apollo-federation/src/sources/connect/url_path_template.rs b/apollo-federation/src/sources/connect/url_path_template.rs index dda02f89cb..83977d3d45 100644 --- a/apollo-federation/src/sources/connect/url_path_template.rs +++ b/apollo-federation/src/sources/connect/url_path_template.rs @@ -1,4 +1,3 @@ -use std::collections::HashSet; use std::fmt::Display; use apollo_compiler::collections::IndexMap; @@ -188,7 +187,7 @@ impl URLPathTemplate { } pub fn required_parameters(&self) -> Vec { - let mut parameters = HashSet::new(); + let mut parameters = IndexSet::default(); for param_value in &self.path { parameters.extend(param_value.required_parameters()); } diff --git a/apollo-federation/tests/query_plan/build_query_plan_support.rs b/apollo-federation/tests/query_plan/build_query_plan_support.rs index 8f594ef271..ed70798f2f 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_support.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_support.rs @@ -1,8 +1,8 @@ -use std::collections::HashSet; use std::io::Read; use std::sync::Mutex; use std::sync::OnceLock; +use apollo_compiler::collections::IndexSet; use apollo_federation::query_plan::query_planner::QueryPlanner; use apollo_federation::query_plan::query_planner::QueryPlannerConfig; use apollo_federation::query_plan::FetchNode; @@ -96,7 +96,7 @@ pub(crate) fn compose( function_path: &'static str, subgraph_names_and_schemas: &[(&str, &str)], ) -> String { - let unique_names: std::collections::HashSet<_> = subgraph_names_and_schemas + let unique_names: IndexSet<_> = subgraph_names_and_schemas .iter() .map(|(name, _)| name) .collect(); @@ -127,7 +127,7 @@ pub(crate) fn compose( let prefix = "# Composed from subgraphs with hash: "; let test_name = function_path.rsplit("::").next().unwrap(); - static SEEN_TEST_NAMES: OnceLock>> = OnceLock::new(); + static SEEN_TEST_NAMES: OnceLock>> = OnceLock::new(); let new = SEEN_TEST_NAMES .get_or_init(Default::default) .lock() diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs index b218969a35..1fc7845951 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs @@ -877,8 +877,6 @@ fn it_handles_longer_require_chain() { } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: investigate this failure fn it_handles_complex_require_chain() { // Another "require chain" test but with more complexity as we have a require on multiple fields, some of which being // nested, and having requirements of their own. @@ -994,40 +992,6 @@ fn it_handles_complex_require_chain() { } }, Parallel { - Sequence { - Flatten(path: "t") { - Fetch(service: "Subgraph2") { - { - ... on T { - __typename - id - } - } => - { - ... on T { - inner2_required - inner1 - } - } - }, - }, - Flatten(path: "t") { - Fetch(service: "Subgraph3") { - { - ... on T { - __typename - inner2_required - id - } - } => - { - ... on T { - inner2 - } - } - }, - }, - }, Flatten(path: "t") { Fetch(service: "Subgraph7") { { @@ -1129,6 +1093,40 @@ fn it_handles_complex_require_chain() { }, }, }, + Sequence { + Flatten(path: "t") { + Fetch(service: "Subgraph2") { + { + ... on T { + __typename + id + } + } => + { + ... on T { + inner2_required + inner1 + } + } + }, + }, + Flatten(path: "t") { + Fetch(service: "Subgraph3") { + { + ... on T { + __typename + inner2_required + id + } + } => + { + ... on T { + inner2 + } + } + }, + }, + }, }, Flatten(path: "t") { Fetch(service: "Subgraph5") { From 031b35024796876021f139a6dfe25c78ae4602b4 Mon Sep 17 00:00:00 2001 From: Duckki Oe Date: Fri, 16 Aug 2024 00:24:33 -0700 Subject: [PATCH 063/108] chore(federation): fixed the error formating of "excessive number of combinations" (#5831) --- apollo-federation/src/query_graph/graph_path.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/apollo-federation/src/query_graph/graph_path.rs b/apollo-federation/src/query_graph/graph_path.rs index 02e40205ee..3c75c66b78 100644 --- a/apollo-federation/src/query_graph/graph_path.rs +++ b/apollo-federation/src/query_graph/graph_path.rs @@ -3178,9 +3178,9 @@ impl SimultaneousPaths { product.saturating_mul(options.len()) }); if num_options > 1_000_000 { - return Err(FederationError::internal( - "flat_cartesian_product: excessive number of combinations: {num_options}", - )); + return Err(FederationError::internal(format!( + "flat_cartesian_product: excessive number of combinations: {num_options}" + ))); } let mut product = Vec::with_capacity(num_options); From 33891e5bba4947bcda41134ab3a6a9513ff70c01 Mon Sep 17 00:00:00 2001 From: Dariusz Kuc <9501705+dariuszkuc@users.noreply.github.com> Date: Fri, 16 Aug 2024 02:26:02 -0500 Subject: [PATCH 064/108] fix(federation): use supergraph when parsing requires fieldset (#5823) When parsing `@requires` field set selection we need to use supergraph schema instead of a target subgraph schema. --- .../src/query_graph/build_query_graph.rs | 3 +- .../extract_subgraphs_from_supergraph.rs | 18 ++- .../build_query_plan_tests/requires.rs | 103 ++++++++++++++++++ .../handles_requires_from_supergraph.graphql | 74 +++++++++++++ 4 files changed, 192 insertions(+), 6 deletions(-) create mode 100644 apollo-federation/tests/query_plan/supergraphs/handles_requires_from_supergraph.graphql diff --git a/apollo-federation/src/query_graph/build_query_graph.rs b/apollo-federation/src/query_graph/build_query_graph.rs index 38b2232dcc..8aca65e9e0 100644 --- a/apollo-federation/src/query_graph/build_query_graph.rs +++ b/apollo-federation/src/query_graph/build_query_graph.rs @@ -1339,8 +1339,9 @@ impl FederatedQueryGraphBuilder { let application = subgraph_data .federation_spec_definition .requires_directive_arguments(directive)?; + // @requires field set is validated against the supergraph let conditions = parse_field_set( - schema, + &self.supergraph_schema, field_definition_position.parent().type_name().clone(), application.fields, )?; diff --git a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs b/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs index 611dfaa309..5fc428e38b 100644 --- a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs +++ b/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs @@ -464,7 +464,10 @@ fn extract_subgraphs_from_fed_2_supergraph( }) .collect::>(); for subgraph in subgraphs.subgraphs.values_mut() { - remove_inactive_requires_and_provides_from_subgraph(&mut subgraph.schema)?; + remove_inactive_requires_and_provides_from_subgraph( + supergraph_schema, + &mut subgraph.schema, + )?; remove_unused_types_from_subgraph(&mut subgraph.schema)?; for definition in all_executable_directive_definitions.iter() { let pos = DirectiveDefinitionPosition { @@ -1995,6 +1998,7 @@ fn add_federation_operations( /// unnecessarily. Besides, if a usage adds something useless, there is a chance it hasn't fully /// understood something, and warning about that fact through an error is more helpful. fn remove_inactive_requires_and_provides_from_subgraph( + supergraph_schema: &FederationSchema, schema: &mut FederationSchema, ) -> Result<(), FederationError> { let federation_spec_definition = get_federation_spec_definition_from_subgraph(schema)?; @@ -2046,6 +2050,7 @@ fn remove_inactive_requires_and_provides_from_subgraph( for pos in object_or_interface_field_definition_positions { remove_inactive_applications( + supergraph_schema, schema, federation_spec_definition, FieldSetDirectiveKind::Requires, @@ -2053,6 +2058,7 @@ fn remove_inactive_requires_and_provides_from_subgraph( pos.clone(), )?; remove_inactive_applications( + supergraph_schema, schema, federation_spec_definition, FieldSetDirectiveKind::Provides, @@ -2070,6 +2076,7 @@ enum FieldSetDirectiveKind { } fn remove_inactive_applications( + supergraph_schema: &FederationSchema, schema: &mut FederationSchema, federation_spec_definition: &'static FederationSpecDefinition, directive_kind: FieldSetDirectiveKind, @@ -2079,7 +2086,7 @@ fn remove_inactive_applications( let mut replacement_directives = Vec::new(); let field = object_or_interface_field_definition_position.get(schema.schema())?; for directive in field.directives.get_all(name_in_schema) { - let (fields, parent_type_pos) = match directive_kind { + let (fields, parent_type_pos, target_schema) = match directive_kind { FieldSetDirectiveKind::Provides => { let fields = federation_spec_definition .provides_directive_arguments(directive)? @@ -2087,7 +2094,7 @@ fn remove_inactive_applications( let parent_type_pos: CompositeTypeDefinitionPosition = schema .get_type(field.ty.inner_named_type().clone())? .try_into()?; - (fields, parent_type_pos) + (fields, parent_type_pos, schema.schema()) } FieldSetDirectiveKind::Requires => { let fields = federation_spec_definition @@ -2098,7 +2105,8 @@ fn remove_inactive_applications( .parent() .clone() .into(); - (fields, parent_type_pos) + // @requires needs to be validated against the supergraph schema + (fields, parent_type_pos, supergraph_schema.schema()) } }; // TODO: The assume_valid_ref() here is non-ideal, in the sense that the error messages we @@ -2108,7 +2116,7 @@ fn remove_inactive_applications( // At best, we could try to shift this computation to after the subgraph schema validation // step, but its unclear at this time whether performing this shift affects correctness (and // it takes time to determine that). So for now, we keep this here. - let valid_schema = Valid::assume_valid_ref(schema.schema()); + let valid_schema = Valid::assume_valid_ref(target_schema); // TODO: In the JS codebase, this function ends up getting additionally used in the schema // upgrader, where parsing the field set may error. In such cases, we end up skipping those // directives instead of returning error here, as it pollutes the list of error messages diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs index 1fc7845951..bcaeac067d 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs @@ -1736,3 +1736,106 @@ fn it_handles_multiple_requires_with_multiple_fetches() { "### ); } + +#[test] +fn handles_requires_from_supergraph() { + // This test verifies that @requires field selection set does not have to be locally satisfiable + // and is valid as long as it is satisfiable in the supergraph. + // In the test below, type U implements interface I only in the Subgraph1, but we can still use + // that type information in the @requires selection set in Subgraph2. + // + // NOTE: While GraphQL does not allow you to return raw interface data, it is still a valid schema. + // Since our interface field is marked as @external, its value should always be provided from + // other subgraph and should not be resolved locally (as that would lead to a runtime exception + // as we don't have any concrete type to return there). + let planner = planner!( + Subgraph1: r#" + type Query { + t: T + } + + type T @key(fields: "id") { + id: ID! + i: I + } + + interface I { + name: String + } + + type U implements I { + name: String @shareable + value: String + } + "#, + Subgraph2: r#" + interface I { + name: String + } + + type U { + name: String @shareable + value: String @external + } + + type T @key(fields: "id") { + id: ID! + i: I @external + r: Int @requires(fields: "i { name ... on U { value } }") + } + "#, + ); + assert_plan!( + &planner, + r#" + { + t { + r + } + } + "#, + + @r###" + QueryPlan { + Sequence { + Fetch(service: "Subgraph1") { + { + t { + __typename + id + i { + __typename + name + ... on U { + value + } + } + } + } + }, + Flatten(path: "t") { + Fetch(service: "Subgraph2") { + { + ... on T { + __typename + id + i { + name + ... on U { + value + } + } + } + } => + { + ... on T { + r + } + } + }, + }, + }, + } + "### + ); +} diff --git a/apollo-federation/tests/query_plan/supergraphs/handles_requires_from_supergraph.graphql b/apollo-federation/tests/query_plan/supergraphs/handles_requires_from_supergraph.graphql new file mode 100644 index 0000000000..a3f47c437f --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/handles_requires_from_supergraph.graphql @@ -0,0 +1,74 @@ +# Composed from subgraphs with hash: 46a2d6c6cf9956c08daa5b3faa018245cb5f9cfe +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +interface I + @join__type(graph: SUBGRAPH1) + @join__type(graph: SUBGRAPH2) +{ + name: String +} + +scalar join__FieldSet + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "Subgraph1", url: "none") + SUBGRAPH2 @join__graph(name: "Subgraph2", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: SUBGRAPH1) + @join__type(graph: SUBGRAPH2) +{ + t: T @join__field(graph: SUBGRAPH1) +} + +type T + @join__type(graph: SUBGRAPH1, key: "id") + @join__type(graph: SUBGRAPH2, key: "id") +{ + id: ID! + i: I @join__field(graph: SUBGRAPH1) @join__field(graph: SUBGRAPH2, external: true) + r: Int @join__field(graph: SUBGRAPH2, requires: "i { name ... on U { value } }") +} + +type U implements I + @join__implements(graph: SUBGRAPH1, interface: "I") + @join__type(graph: SUBGRAPH1) + @join__type(graph: SUBGRAPH2) +{ + name: String + value: String @join__field(graph: SUBGRAPH1) @join__field(graph: SUBGRAPH2, external: true) +} From c141a07695a8b70f5e4db74ce931d01f29ca0958 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Fri, 16 Aug 2024 09:46:25 +0100 Subject: [PATCH 065/108] Fix session counting and the reporting of file handle shortage Session counting incorrectly included connections to the health check or other non-graphql connections. This is now corrected so that only connections to the main graphql port are counted. Warnings about file handle shortages are now handled correctly as a global resource. --- .../axum_factory/axum_http_server_factory.rs | 2 + apollo-router/src/axum_factory/listeners.rs | 78 ++++++++++++------- 2 files changed, 52 insertions(+), 28 deletions(-) diff --git a/apollo-router/src/axum_factory/axum_http_server_factory.rs b/apollo-router/src/axum_factory/axum_http_server_factory.rs index f687440f4c..08df933dc6 100644 --- a/apollo-router/src/axum_factory/axum_http_server_factory.rs +++ b/apollo-router/src/axum_factory/axum_http_server_factory.rs @@ -303,6 +303,7 @@ impl HttpServerFactory for AxumHttpServerFactory { main_listener, actual_main_listen_address.clone(), all_routers.main.1, + true, all_connections_stopped_sender.clone(), ); @@ -341,6 +342,7 @@ impl HttpServerFactory for AxumHttpServerFactory { listener, listen_addr.clone(), router, + false, all_connections_stopped_sender.clone(), ); ( diff --git a/apollo-router/src/axum_factory/listeners.rs b/apollo-router/src/axum_factory/listeners.rs index 6be06acdbd..01796217d1 100644 --- a/apollo-router/src/axum_factory/listeners.rs +++ b/apollo-router/src/axum_factory/listeners.rs @@ -15,6 +15,7 @@ use futures::channel::oneshot; use futures::prelude::*; use hyper::server::conn::Http; use multimap::MultiMap; +use parking_lot::Mutex; #[cfg(unix)] use tokio::net::UnixListener; use tokio::sync::mpsc; @@ -31,7 +32,8 @@ use crate::router::ApolloRouterError; use crate::router_factory::Endpoint; use crate::ListenAddr; -pub(crate) static SESSION_COUNT: AtomicU64 = AtomicU64::new(0); +static MAX_OPEN_FILE_WARNING: Mutex> = Mutex::new(None); +static SESSION_COUNT: AtomicU64 = AtomicU64::new(0); #[derive(Clone, Debug)] pub(crate) struct ListenAddrAndRouter(pub(crate) ListenAddr, pub(crate) Router); @@ -197,10 +199,34 @@ pub(super) async fn get_extra_listeners( Ok(listeners_and_routers) } +async fn check_open_files() { + { + let mut max_open_file_warning = MAX_OPEN_FILE_WARNING.lock(); + match *max_open_file_warning { + None => { + tracing::error!( + "reached the max open file limit, cannot accept any new connection" + ); + *max_open_file_warning = Some(Instant::now()); + } + Some(last) => { + if Instant::now() - last > Duration::from_secs(60) { + tracing::error!( + "still at the max open file limit, cannot accept any new connection" + ); + *max_open_file_warning = Some(Instant::now()); + } + } + } + } + tokio::time::sleep(Duration::from_millis(1)).await; +} + pub(super) fn serve_router_on_listen_addr( mut listener: Listener, address: ListenAddr, router: axum::Router, + main_graphql_port: bool, all_connections_stopped_sender: mpsc::Sender<()>, ) -> (impl Future, oneshot::Sender<()>) { let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>(); @@ -213,7 +239,6 @@ pub(super) fn serve_router_on_listen_addr( tokio::pin!(shutdown_receiver); let connection_shutdown = Arc::new(Notify::new()); - let mut max_open_file_warning = None; let address = address.to_string(); @@ -229,16 +254,21 @@ pub(super) fn serve_router_on_listen_addr( match res { Ok(res) => { - if max_open_file_warning.is_some(){ - tracing::info!("can accept connections again"); - max_open_file_warning = None; + { + let mut max_open_file_warning = MAX_OPEN_FILE_WARNING.lock(); + if max_open_file_warning.is_some() { + tracing::info!("can accept connections again"); + *max_open_file_warning = None; + } + } + // We only want to recognise sessions if we are the main graphql port. + if main_graphql_port { + let session_count = SESSION_COUNT.fetch_add(1, Ordering::Acquire)+1; + tracing::info!( + value.apollo_router_session_count_total = session_count, + listener = &address + ); } - - let session_count = SESSION_COUNT.fetch_add(1, Ordering::Acquire)+1; - tracing::info!( - value.apollo_router_session_count_total = session_count, - listener = &address - ); let address = address.clone(); tokio::task::spawn(async move { @@ -356,12 +386,14 @@ pub(super) fn serve_router_on_listen_addr( } } - let session_count = SESSION_COUNT.fetch_sub(1, Ordering::Acquire)-1; - tracing::info!( - value.apollo_router_session_count_total = session_count, - listener = &address - ); - + // We only want to recognise sessions if we are the main graphql port. + if main_graphql_port { + let session_count = SESSION_COUNT.fetch_sub(1, Ordering::Acquire)-1; + tracing::info!( + value.apollo_router_session_count_total = session_count, + listener = &address + ); + } }); } @@ -419,17 +451,7 @@ pub(super) fn serve_router_on_listen_addr( _ => { match e.raw_os_error() { Some(libc::EMFILE) | Some(libc::ENFILE) => { - match max_open_file_warning { - None => { - tracing::error!("reached the max open file limit, cannot accept any new connection"); - max_open_file_warning = Some(Instant::now()); - } - Some(last) => if Instant::now() - last > Duration::from_secs(60) { - tracing::error!("still at the max open file limit, cannot accept any new connection"); - max_open_file_warning = Some(Instant::now()); - } - } - tokio::time::sleep(Duration::from_millis(1)).await; + check_open_files().await; } _ => {} } From 6d20709b5eadca8687a5101ec7d42f6183cd09e3 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Fri, 16 Aug 2024 09:58:58 +0100 Subject: [PATCH 066/108] add a changeset --- .../fix_garypen_fix_sessions_and_handle_reporting.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changesets/fix_garypen_fix_sessions_and_handle_reporting.md diff --git a/.changesets/fix_garypen_fix_sessions_and_handle_reporting.md b/.changesets/fix_garypen_fix_sessions_and_handle_reporting.md new file mode 100644 index 0000000000..f9527320e6 --- /dev/null +++ b/.changesets/fix_garypen_fix_sessions_and_handle_reporting.md @@ -0,0 +1,7 @@ +### Fix session counting and the reporting of file handle shortage ([PR #5834](https://github.com/apollographql/router/pull/5834)) + +Session counting incorrectly included connections to the health check or other non-graphql connections. This is now corrected so that only connections to the main graphql port are counted. + +Warnings about file handle shortages are now handled correctly as a global resource. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/5834 \ No newline at end of file From 853b70c6db165c90aad52bf1bd2f185d632b988f Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Fri, 16 Aug 2024 12:19:21 +0100 Subject: [PATCH 067/108] Remove custom log rate limiting from listener The listener had its own custom rate limiting for log messages. The router comes with a standard mechanism for rate limiting log messages. Remove the custom rate limiting mechanism and advised in the changelog that standard rate limiting configuration should be used. --- apollo-router/src/axum_factory/listeners.rs | 42 +++++---------------- 1 file changed, 9 insertions(+), 33 deletions(-) diff --git a/apollo-router/src/axum_factory/listeners.rs b/apollo-router/src/axum_factory/listeners.rs index 01796217d1..dad439317c 100644 --- a/apollo-router/src/axum_factory/listeners.rs +++ b/apollo-router/src/axum_factory/listeners.rs @@ -7,7 +7,6 @@ use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; use std::sync::Arc; use std::time::Duration; -use std::time::Instant; use axum::response::*; use axum::Router; @@ -15,7 +14,6 @@ use futures::channel::oneshot; use futures::prelude::*; use hyper::server::conn::Http; use multimap::MultiMap; -use parking_lot::Mutex; #[cfg(unix)] use tokio::net::UnixListener; use tokio::sync::mpsc; @@ -32,8 +30,8 @@ use crate::router::ApolloRouterError; use crate::router_factory::Endpoint; use crate::ListenAddr; -static MAX_OPEN_FILE_WARNING: Mutex> = Mutex::new(None); static SESSION_COUNT: AtomicU64 = AtomicU64::new(0); +static MAX_FILE_HANDLES_WARN: AtomicBool = AtomicBool::new(false); #[derive(Clone, Debug)] pub(crate) struct ListenAddrAndRouter(pub(crate) ListenAddr, pub(crate) Router); @@ -199,29 +197,6 @@ pub(super) async fn get_extra_listeners( Ok(listeners_and_routers) } -async fn check_open_files() { - { - let mut max_open_file_warning = MAX_OPEN_FILE_WARNING.lock(); - match *max_open_file_warning { - None => { - tracing::error!( - "reached the max open file limit, cannot accept any new connection" - ); - *max_open_file_warning = Some(Instant::now()); - } - Some(last) => { - if Instant::now() - last > Duration::from_secs(60) { - tracing::error!( - "still at the max open file limit, cannot accept any new connection" - ); - *max_open_file_warning = Some(Instant::now()); - } - } - } - } - tokio::time::sleep(Duration::from_millis(1)).await; -} - pub(super) fn serve_router_on_listen_addr( mut listener: Listener, address: ListenAddr, @@ -254,12 +229,9 @@ pub(super) fn serve_router_on_listen_addr( match res { Ok(res) => { - { - let mut max_open_file_warning = MAX_OPEN_FILE_WARNING.lock(); - if max_open_file_warning.is_some() { - tracing::info!("can accept connections again"); - *max_open_file_warning = None; - } + if MAX_FILE_HANDLES_WARN.load(Ordering::SeqCst) { + tracing::info!("can accept connections again"); + MAX_FILE_HANDLES_WARN.store(false, Ordering::SeqCst); } // We only want to recognise sessions if we are the main graphql port. if main_graphql_port { @@ -451,7 +423,11 @@ pub(super) fn serve_router_on_listen_addr( _ => { match e.raw_os_error() { Some(libc::EMFILE) | Some(libc::ENFILE) => { - check_open_files().await; + tracing::error!( + "reached the max open file limit, cannot accept any new connection" + ); + MAX_FILE_HANDLES_WARN.store(true, Ordering::SeqCst); + tokio::time::sleep(Duration::from_millis(1)).await; } _ => {} } From d6114d81aef2f94e6b1bb728fa44286cbd7058b1 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Fri, 16 Aug 2024 12:22:42 +0100 Subject: [PATCH 068/108] Update changeset message to reflect changes --- .changesets/fix_garypen_fix_sessions_and_handle_reporting.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.changesets/fix_garypen_fix_sessions_and_handle_reporting.md b/.changesets/fix_garypen_fix_sessions_and_handle_reporting.md index f9527320e6..cb77b5aa99 100644 --- a/.changesets/fix_garypen_fix_sessions_and_handle_reporting.md +++ b/.changesets/fix_garypen_fix_sessions_and_handle_reporting.md @@ -4,4 +4,6 @@ Session counting incorrectly included connections to the health check or other n Warnings about file handle shortages are now handled correctly as a global resource. -By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/5834 \ No newline at end of file +The listening logic had its own custom rate limiting notifications. This has been removed and log notification is now controlled by the [standard router log rate limiting configuration](https://www.apollographql.com/docs/router/configuration/telemetry/exporters/logging/stdout/#rate_limit) + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/5834 From da61b6602a3e3aaae304e4a70529a1fffada2259 Mon Sep 17 00:00:00 2001 From: Jeremy Lempereur Date: Mon, 19 Aug 2024 09:33:54 +0200 Subject: [PATCH 069/108] Fix: Update the fragment naming convention so it matches the javascript one + allow router configuration to define if generate query fragments is enabled in the rust planner (#5835) This changeset makes sure the rust planner fragment names match their javascript counterpart, and it makes sure the router configuration pertaining to fragment reuse is properly applied in the javascript and in the rust planner. --- apollo-federation/src/operation/optimize.rs | 2 +- .../fragment_autogeneration.rs | 34 +++++++++---------- .../src/query_planner/bridge_query_planner.rs | 2 +- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/apollo-federation/src/operation/optimize.rs b/apollo-federation/src/operation/optimize.rs index de25d12746..68f8178783 100644 --- a/apollo-federation/src/operation/optimize.rs +++ b/apollo-federation/src/operation/optimize.rs @@ -1652,7 +1652,7 @@ impl FragmentGenerator { |condition| condition.to_string(), ); let selections = frag.selection_set.selections.len(); - let mut name = format!("_generated_on{type_condition}_{selections}"); + let mut name = format!("_generated_on{type_condition}{selections}"); let key = (type_condition, selections); let index = self diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs index 69d4d4e5b3..876334fa5d 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs @@ -53,14 +53,14 @@ fn it_respects_generate_query_fragments_option() { { t { __typename - ..._generated_onA_2_0 + ..._generated_onA2_0 ... on B { z } } } - fragment _generated_onA_2_0 on A { + fragment _generated_onA2_0 on A { x y } @@ -105,21 +105,21 @@ fn it_handles_nested_fragment_generation() { { t { __typename - ..._generated_onA_3_0 + ..._generated_onA3_0 } } - fragment _generated_onA_2_0 on A { + fragment _generated_onA2_0 on A { x y } - fragment _generated_onA_3_0 on A { + fragment _generated_onA3_0 on A { x y t { __typename - ..._generated_onA_2_0 + ..._generated_onA2_0 ... on B { z } @@ -159,11 +159,11 @@ fn it_handles_fragments_with_one_non_leaf_field() { { t { __typename - ..._generated_onA_1_0 + ..._generated_onA1_0 } } - fragment _generated_onA_1_0 on A { + fragment _generated_onA1_0 on A { t { __typename ... on B { @@ -219,11 +219,11 @@ fn it_migrates_skip_include() { { t { __typename - ..._generated_onA_3_0 + ..._generated_onA3_0 } } - fragment _generated_onA_3_0 on A { + fragment _generated_onA3_0 on A { x y t { @@ -277,15 +277,15 @@ fn it_identifies_and_reuses_equivalent_fragments_that_arent_identical() { { t { __typename - ..._generated_onA_2_0 + ..._generated_onA2_0 } t2 { __typename - ..._generated_onA_2_0 + ..._generated_onA2_0 } } - fragment _generated_onA_2_0 on A { + fragment _generated_onA2_0 on A { x y } @@ -325,20 +325,20 @@ fn fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment { t { __typename - ..._generated_onA_2_0 + ..._generated_onA2_0 } t2 { __typename - ..._generated_onA_2_1 + ..._generated_onA2_1 } } - fragment _generated_onA_2_0 on A { + fragment _generated_onA2_0 on A { x y } - fragment _generated_onA_2_1 on A { + fragment _generated_onA2_1 on A { y z } diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index 1afbb60888..cb6335afb0 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -182,7 +182,7 @@ impl PlannerMode { .reuse_query_fragments .unwrap_or(true), subgraph_graphql_validation: false, - generate_query_fragments: false, + generate_query_fragments: configuration.supergraph.generate_query_fragments, incremental_delivery: apollo_federation::query_plan::query_planner::QueryPlanIncrementalDeliveryConfig { enable_defer: configuration.supergraph.defer_support, From ae522fb22a2ef220696862c5765c5c3892fcaa9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Mon, 19 Aug 2024 10:05:48 +0200 Subject: [PATCH 070/108] fix: use consistent `query` name in uplink metrics (#5816) --- .changesets/fix_renee_consistent_uplink_type.md | 5 +++++ apollo-router/src/uplink/mod.rs | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 .changesets/fix_renee_consistent_uplink_type.md diff --git a/.changesets/fix_renee_consistent_uplink_type.md b/.changesets/fix_renee_consistent_uplink_type.md new file mode 100644 index 0000000000..7a6e044edf --- /dev/null +++ b/.changesets/fix_renee_consistent_uplink_type.md @@ -0,0 +1,5 @@ +### Fix inconsistent `type` attribute in `apollo.router.uplink.fetch.duration` metric ([PR #5816](https://github.com/apollographql/router/pull/5816)) + +The router now always reports a short name in the `type` attribute for the `apollo.router.fetch.duration` metric, instead of sometimes using a fully-qualified Rust path and sometimes using a short name. + +By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/5816 diff --git a/apollo-router/src/uplink/mod.rs b/apollo-router/src/uplink/mod.rs index d6eb3262c5..6a8974699e 100644 --- a/apollo-router/src/uplink/mod.rs +++ b/apollo-router/src/uplink/mod.rs @@ -414,7 +414,7 @@ where tracing::info!( histogram.apollo_router_uplink_fetch_duration_seconds = now.elapsed().as_secs_f64(), - query = std::any::type_name::(), + query, url = url.to_string(), "kind" = "http_error", error = e.to_string(), @@ -441,7 +441,7 @@ fn query_name() -> &'static str { let mut query = std::any::type_name::(); query = query .strip_suffix("Query") - .expect("Uplink structs mut be named xxxQuery") + .expect("Uplink structs must be named xxxQuery") .get(query.rfind("::").map(|index| index + 2).unwrap_or_default()..) .expect("cannot fail"); query From 758e502ce07cb8c6ef7d76fbf051ea8771e157b8 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Mon, 19 Aug 2024 16:31:06 +0200 Subject: [PATCH 071/108] raise the default Redis timeout to 500ms (#5795) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous default of 2ms was too low for production use cases: users keep running into issues with this low timeout with transient networking issues Co-authored-by: Renée Co-authored-by: Edward Huang --- .changesets/config_geal_raise_redis_timeouts.md | 5 +++++ Cargo.toml | 2 +- apollo-router/src/cache/redis.rs | 2 +- docs/source/configuration/distributed-caching.mdx | 4 ++-- docs/source/configuration/entity-caching.mdx | 2 +- 5 files changed, 10 insertions(+), 5 deletions(-) create mode 100644 .changesets/config_geal_raise_redis_timeouts.md diff --git a/.changesets/config_geal_raise_redis_timeouts.md b/.changesets/config_geal_raise_redis_timeouts.md new file mode 100644 index 0000000000..3dec9af696 --- /dev/null +++ b/.changesets/config_geal_raise_redis_timeouts.md @@ -0,0 +1,5 @@ +### Increase default Redis timeout ([PR #5795](https://github.com/apollographql/router/pull/5795)) + +The default Redis command timeout was increased from 2ms to 500ms to accommodate common production use cases. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5795 \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 7c5fe5a189..2d5abeb92d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -75,4 +75,4 @@ serde_json_bytes = { version = "0.2.4", features = ["preserve_order"] } sha1 = "0.10.6" tempfile = "3.10.1" tokio = { version = "1.36.0", features = ["full"] } -tower = { version = "0.4.13", features = ["full"] } +tower = { version = "0.4.13", features = ["full"] } \ No newline at end of file diff --git a/apollo-router/src/cache/redis.rs b/apollo-router/src/cache/redis.rs index ae0697b3cf..1e14cc4247 100644 --- a/apollo-router/src/cache/redis.rs +++ b/apollo-router/src/cache/redis.rs @@ -171,7 +171,7 @@ impl RedisCacheStorage { let client = RedisClient::new( client_config, Some(PerformanceConfig { - default_command_timeout: config.timeout.unwrap_or(Duration::from_millis(2)), + default_command_timeout: config.timeout.unwrap_or(Duration::from_millis(500)), ..Default::default() }), None, diff --git a/docs/source/configuration/distributed-caching.mdx b/docs/source/configuration/distributed-caching.mdx index 2890cad126..ddc87a6fa3 100644 --- a/docs/source/configuration/distributed-caching.mdx +++ b/docs/source/configuration/distributed-caching.mdx @@ -131,7 +131,7 @@ supergraph: urls: ["redis://..."] #highlight-line username: admin/123 # Optional, can be part of the urls directly, mainly useful if you have special character like '/' in your password that doesn't work in url. This field takes precedence over the username in the URL password: admin # Optional, can be part of the urls directly, mainly useful if you have special character like '/' in your password that doesn't work in url. This field takes precedence over the password in the URL - timeout: 5ms # Optional, by default: 2ms + timeout: 2s # Optional, by default: 500ms ttl: 24h # Optional namespace: "prefix" # Optional #tls: @@ -141,7 +141,7 @@ supergraph: #### Timeout -Connecting and sending commands to Redis are subject to a timeout, set by default to 2ms, that can be overriden. +Connecting and sending commands to Redis are subject to a timeout, set by default to 500ms, that can be overriden. #### TTL diff --git a/docs/source/configuration/entity-caching.mdx b/docs/source/configuration/entity-caching.mdx index 53aeb930f5..1916a3ff34 100644 --- a/docs/source/configuration/entity-caching.mdx +++ b/docs/source/configuration/entity-caching.mdx @@ -91,7 +91,7 @@ preview_entity_cache: # Configure Redis redis: urls: ["redis://..."] - timeout: 5ms # Optional, by default: 2ms + timeout: 2s # Optional, by default: 500ms ttl: 24h # Optional, by default no expiration # Configure entity caching per subgraph, overrides options from the "all" section subgraphs: From d66ded2ad7edc9da014f22c41aee3fbfd03439a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Mon, 19 Aug 2024 16:31:52 +0200 Subject: [PATCH 072/108] fix: pin rowan version to pass CI (#5841) --- Cargo.lock | 1 + apollo-router/Cargo.toml | 3 +++ rust-toolchain.toml | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index b66ac53f6e..c20249f7e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -572,6 +572,7 @@ dependencies = [ "rhai", "rmp", "router-bridge", + "rowan", "rstack", "rust-embed", "rustls", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 26618a16a6..5716453c60 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -139,6 +139,9 @@ nu-ansi-term = "0.49" num-traits = "0.2.19" once_cell = "1.19.0" +# Pin rowan to a version pre-MSRV bump. Remove if we update our rust-toolchain. +rowan = "= 0.15.15" + # Any package that starts with `opentelemetry` needs to be updated with care # because it is tightly intertwined with the `tracing` packages on account of # the `opentelemetry-tracing` package. diff --git a/rust-toolchain.toml b/rust-toolchain.toml index b94b409b13..0c7dc7c811 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] # renovate-automation: rustc version -channel = "1.76.0" +channel = "1.76.0" # If updated, remove `rowan` dependency from apollo-router/Cargo.toml components = [ "rustfmt", "clippy" ] From 8a61130b7fb9ff57c2c5ead06a96b5a0541f5c23 Mon Sep 17 00:00:00 2001 From: Taylor Ninesling Date: Mon, 19 Aug 2024 07:57:23 -0700 Subject: [PATCH 073/108] Fix cost result filtering for custom metrics (#5838) Co-authored-by: Bryn Cooke --- .../fix_tninesling_cost_result_filtering.md | 41 ++++++ apollo-router/src/plugins/telemetry/config.rs | 8 +- .../plugins/telemetry/config_new/selectors.rs | 7 +- apollo-router/src/plugins/telemetry/mod.rs | 130 ++++++++++++++++++ .../demand_control_delta_filter.router.yaml | 47 +++++++ ...emand_control_result_attribute.router.yaml | 40 ++++++ .../demand_control_result_filter.router.yaml | 52 +++++++ 7 files changed, 323 insertions(+), 2 deletions(-) create mode 100644 .changesets/fix_tninesling_cost_result_filtering.md create mode 100644 apollo-router/src/plugins/telemetry/testdata/demand_control_delta_filter.router.yaml create mode 100644 apollo-router/src/plugins/telemetry/testdata/demand_control_result_attribute.router.yaml create mode 100644 apollo-router/src/plugins/telemetry/testdata/demand_control_result_filter.router.yaml diff --git a/.changesets/fix_tninesling_cost_result_filtering.md b/.changesets/fix_tninesling_cost_result_filtering.md new file mode 100644 index 0000000000..c1a773e134 --- /dev/null +++ b/.changesets/fix_tninesling_cost_result_filtering.md @@ -0,0 +1,41 @@ +### Fix cost result filtering for custom metrics ([PR #5838](https://github.com/apollographql/router/pull/5838)) + +Fix filtering for custom metrics that use demand control cost information in their conditions. This allows a telemetry config such as: + +```yaml +telemetry: + instrumentation: + instruments: + supergraph: + cost.rejected.operations: + type: histogram + value: + cost: estimated + description: "Estimated cost per rejected operation." + unit: delta + condition: + eq: + - cost: result + - "COST_ESTIMATED_TOO_EXPENSIVE" +``` + +Additionally, this fixes an issue with attribute comparisons which would silently fail to compare integers to float values. Now, users can write integer values in conditions that compare against selectors that select floats: + +```yaml +telemetry: + instrumentation: + instruments: + supergraph: + cost.rejected.operations: + type: histogram + value: + cost: actual + description: "Estimated cost per rejected operation." + unit: delta + condition: + gt: + - cost: delta + - 1 +``` + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5838 diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index 797c838ed0..f360774b9b 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -4,6 +4,7 @@ use std::collections::HashSet; use axum::headers::HeaderName; use derivative::Derivative; +use num_traits::ToPrimitive; use opentelemetry::sdk::metrics::new_view; use opentelemetry::sdk::metrics::Aggregation; use opentelemetry::sdk::metrics::Instrument; @@ -495,7 +496,12 @@ impl PartialOrd for AttributeValue { (AttributeValue::F64(f1), AttributeValue::F64(f2)) => f1.partial_cmp(f2), (AttributeValue::I64(i1), AttributeValue::I64(i2)) => i1.partial_cmp(i2), (AttributeValue::String(s1), AttributeValue::String(s2)) => s1.partial_cmp(s2), - // Arrays and mismatched types are incomparable + // Mismatched numerics are comparable + (AttributeValue::F64(f1), AttributeValue::I64(i)) => { + i.to_f64().as_ref().and_then(|f2| f1.partial_cmp(f2)) + } + (AttributeValue::I64(i), AttributeValue::F64(f)) => i.to_f64()?.partial_cmp(f), + // Arrays and other mismatched types are incomparable _ => None, } } diff --git a/apollo-router/src/plugins/telemetry/config_new/selectors.rs b/apollo-router/src/plugins/telemetry/config_new/selectors.rs index 875f88efb7..2f8aa1faf6 100644 --- a/apollo-router/src/plugins/telemetry/config_new/selectors.rs +++ b/apollo-router/src/plugins/telemetry/config_new/selectors.rs @@ -224,7 +224,12 @@ impl From<&SupergraphValue> for InstrumentValue { fn from(value: &SupergraphValue) -> Self { match value { SupergraphValue::Standard(s) => InstrumentValue::Standard(s.clone()), - SupergraphValue::Custom(selector) => InstrumentValue::Custom(selector.clone()), + SupergraphValue::Custom(selector) => match selector { + SupergraphSelector::Cost { .. } => { + InstrumentValue::Chunked(Event::Custom(selector.clone())) + } + _ => InstrumentValue::Custom(selector.clone()), + }, SupergraphValue::Event(e) => InstrumentValue::Chunked(e.clone()), } } diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index 6fea2af4b1..8b2aefe4a9 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -2162,6 +2162,7 @@ mod tests { use crate::error::FetchError; use crate::graphql; use crate::graphql::Error; + use crate::graphql::IntoGraphQLErrors; use crate::graphql::Request; use crate::http_ext; use crate::json_ext::Object; @@ -2170,6 +2171,8 @@ mod tests { use crate::plugin::test::MockSubgraphService; use crate::plugin::test::MockSupergraphService; use crate::plugin::DynPlugin; + use crate::plugins::demand_control::CostContext; + use crate::plugins::demand_control::DemandControlError; use crate::plugins::telemetry::config::TraceIdFormat; use crate::plugins::telemetry::handle_error_internal; use crate::services::router::body::get_body_bytes; @@ -3240,4 +3243,131 @@ mod tests { "04f9e396-465c-4840-bc2b-f493b8b1a7fc" ); } + + async fn make_failed_demand_control_request(plugin: &dyn DynPlugin, cost_details: CostContext) { + let mut mock_service = MockSupergraphService::new(); + mock_service + .expect_call() + .times(1) + .returning(move |req: SupergraphRequest| { + req.context.extensions().with_lock(|mut lock| { + lock.insert(cost_details.clone()); + }); + + let errors = if cost_details.result == "COST_ESTIMATED_TOO_EXPENSIVE" { + DemandControlError::EstimatedCostTooExpensive { + estimated_cost: cost_details.estimated, + max_cost: (cost_details.estimated - 5.0).max(0.0), + } + .into_graphql_errors() + .unwrap() + } else if cost_details.result == "COST_ACTUAL_TOO_EXPENSIVE" { + DemandControlError::ActualCostTooExpensive { + actual_cost: cost_details.actual, + max_cost: (cost_details.actual - 5.0).max(0.0), + } + .into_graphql_errors() + .unwrap() + } else { + Vec::new() + }; + + SupergraphResponse::fake_builder() + .context(req.context) + .data( + serde_json::to_value(graphql::Response::builder().errors(errors).build()) + .unwrap(), + ) + .build() + }); + + let mut service = plugin.supergraph_service(BoxService::new(mock_service)); + let router_req = SupergraphRequest::fake_builder().build().unwrap(); + let _router_response = service + .ready() + .await + .unwrap() + .call(router_req) + .await + .unwrap() + .next_response() + .await + .unwrap(); + } + + #[tokio::test] + async fn test_demand_control_delta_filter() { + async { + let plugin = create_plugin_with_config(include_str!( + "testdata/demand_control_delta_filter.router.yaml" + )) + .await; + make_failed_demand_control_request( + plugin.as_ref(), + CostContext { + estimated: 10.0, + actual: 8.0, + result: "COST_ACTUAL_TOO_EXPENSIVE", + strategy: "static_estimated", + }, + ) + .await; + + assert_histogram_sum!("cost.rejected.operations", 8.0); + } + .with_metrics() + .await; + } + + #[tokio::test] + async fn test_demand_control_result_filter() { + async { + let plugin = create_plugin_with_config(include_str!( + "testdata/demand_control_result_filter.router.yaml" + )) + .await; + make_failed_demand_control_request( + plugin.as_ref(), + CostContext { + estimated: 10.0, + actual: 0.0, + result: "COST_ESTIMATED_TOO_EXPENSIVE", + strategy: "static_estimated", + }, + ) + .await; + + assert_histogram_sum!("cost.rejected.operations", 10.0); + } + .with_metrics() + .await; + } + + #[tokio::test] + async fn test_demand_control_result_attributes() { + async { + let plugin = create_plugin_with_config(include_str!( + "testdata/demand_control_result_attribute.router.yaml" + )) + .await; + make_failed_demand_control_request( + plugin.as_ref(), + CostContext { + estimated: 10.0, + actual: 0.0, + result: "COST_ESTIMATED_TOO_EXPENSIVE", + strategy: "static_estimated", + }, + ) + .await; + + assert_histogram_sum!( + "cost.estimated", + 10.0, + "cost.result" = "COST_ESTIMATED_TOO_EXPENSIVE" + ); + } + .with_metrics() + .await; + } } diff --git a/apollo-router/src/plugins/telemetry/testdata/demand_control_delta_filter.router.yaml b/apollo-router/src/plugins/telemetry/testdata/demand_control_delta_filter.router.yaml new file mode 100644 index 0000000000..09d2948319 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/testdata/demand_control_delta_filter.router.yaml @@ -0,0 +1,47 @@ +# Demand control enabled in measure mode. +preview_demand_control: + enabled: true + # Use measure mode to monitor the costs of your operations without rejecting any. + mode: measure + + strategy: + # Static estimated strategy has a fixed cost for elements. + static_estimated: + # The assumed returned list size for operations. Set this to the maximum number of items in a GraphQL list + list_size: 10 + # The maximum cost of a single operation, above which the operation is rejected. + max: 1000 + +# Basic telemetry configuration for cost. +telemetry: + exporters: + metrics: + common: + service_name: apollo-router + views: + # Define a custom view because cost is different than the default latency-oriented view of OpenTelemetry + - name: cost.* + aggregation: + histogram: + buckets: + - 0 + - 10 + - 100 + - 1000 + - 10000 + - 100000 + - 1000000 + + instrumentation: + instruments: + supergraph: + cost.rejected.operations: + type: histogram + value: + cost: actual + description: "Estimated cost per rejected operation." + unit: delta + condition: + gt: + - cost: delta + - 1 \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/testdata/demand_control_result_attribute.router.yaml b/apollo-router/src/plugins/telemetry/testdata/demand_control_result_attribute.router.yaml new file mode 100644 index 0000000000..6dc88e995c --- /dev/null +++ b/apollo-router/src/plugins/telemetry/testdata/demand_control_result_attribute.router.yaml @@ -0,0 +1,40 @@ +# Demand control enabled in measure mode. +preview_demand_control: + enabled: true + # Use measure mode to monitor the costs of your operations without rejecting any. + mode: measure + + strategy: + # Static estimated strategy has a fixed cost for elements. + static_estimated: + # The assumed returned list size for operations. Set this to the maximum number of items in a GraphQL list + list_size: 10 + # The maximum cost of a single operation, above which the operation is rejected. + max: 1000 + +# Basic telemetry configuration for cost. +telemetry: + exporters: + metrics: + common: + service_name: apollo-router + views: + # Define a custom view because cost is different than the default latency-oriented view of OpenTelemetry + - name: cost.* + aggregation: + histogram: + buckets: + - 0 + - 10 + - 100 + - 1000 + - 10000 + - 100000 + - 1000000 + + instrumentation: + instruments: + supergraph: + cost.estimated: + attributes: + cost.result: true \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/testdata/demand_control_result_filter.router.yaml b/apollo-router/src/plugins/telemetry/testdata/demand_control_result_filter.router.yaml new file mode 100644 index 0000000000..1b78a1e15e --- /dev/null +++ b/apollo-router/src/plugins/telemetry/testdata/demand_control_result_filter.router.yaml @@ -0,0 +1,52 @@ +# Demand control enabled in measure mode. +preview_demand_control: + enabled: true + # Use measure mode to monitor the costs of your operations without rejecting any. + mode: measure + + strategy: + # Static estimated strategy has a fixed cost for elements. + static_estimated: + # The assumed returned list size for operations. Set this to the maximum number of items in a GraphQL list + list_size: 10 + # The maximum cost of a single operation, above which the operation is rejected. + max: 1000 + +# Basic telemetry configuration for cost. +telemetry: + exporters: + metrics: + common: + service_name: apollo-router + views: + # Define a custom view because cost is different than the default latency-oriented view of OpenTelemetry + - name: cost.* + aggregation: + histogram: + buckets: + - 0 + - 10 + - 100 + - 1000 + - 10000 + - 100000 + - 1000000 + + instrumentation: + instruments: + supergraph: + # custom instrument + cost.rejected.operations: + type: histogram + value: + # Estimated cost is used to populate the histogram + cost: estimated + description: "Estimated cost per rejected operation." + unit: delta + condition: + eq: + # Only show rejected operations. + - cost: result + - "COST_ESTIMATED_TOO_EXPENSIVE" + attributes: + graphql.operation.name: true # Graphql operation name is added as an attribute \ No newline at end of file From 0cd601993e8ff628a6eeebacd3216b73d6ea27a7 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Mon, 19 Aug 2024 18:25:36 +0200 Subject: [PATCH 074/108] support for clustering in the redis DEL command (#5793) When using redis in cluster mode, if we are trying to use any compounding commands like MGET or DEL, we must first split the list of keys per cluster hash, then query each redis instance according to its hash. This was already implemented for MGET, this PR fixes it for the DEL command used in invalidation. Our integration tests cannot use redis clusters yet, so this was checked with manual testing --- apollo-router/src/cache/redis.rs | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/apollo-router/src/cache/redis.rs b/apollo-router/src/cache/redis.rs index 1e14cc4247..f16130c116 100644 --- a/apollo-router/src/cache/redis.rs +++ b/apollo-router/src/cache/redis.rs @@ -562,16 +562,29 @@ impl RedisCacheStorage { } pub(crate) async fn delete(&self, keys: Vec>) -> Option { - self.inner - .del(keys) - .await - .map_err(|e| { - if !e.is_not_found() { + let mut h: HashMap> = HashMap::new(); + for key in keys.into_iter() { + let key = self.make_key(key); + let hash = ClusterRouting::hash_key(key.as_bytes()); + let entry = h.entry(hash).or_default(); + entry.push(key); + } + + // then we query all the key groups at the same time + let results: Vec> = + futures::future::join_all(h.into_values().map(|keys| self.inner.del(keys))).await; + let mut total = 0u32; + + for res in results { + match res { + Ok(res) => total += res, + Err(e) => { tracing::error!(error = %e, "redis del error"); } - e - }) - .ok() + } + } + + Some(total) } pub(crate) fn scan( From b7f1d7070bfc827f15aae2779226339a4dc0b87c Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Mon, 19 Aug 2024 18:53:41 +0200 Subject: [PATCH 075/108] fix configuration for remote spans changeset (#5845) Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- .changesets/fix_bryn_remote_spans.md | 1 + 1 file changed, 1 insertion(+) diff --git a/.changesets/fix_bryn_remote_spans.md b/.changesets/fix_bryn_remote_spans.md index bc74c68ec5..76eb80956d 100644 --- a/.changesets/fix_bryn_remote_spans.md +++ b/.changesets/fix_bryn_remote_spans.md @@ -3,6 +3,7 @@ The router now correctly propagates trace IDs when using the `propagation.request.header_name` configuration option. ```yaml +telemetry: exporters: tracing: propagation: From 5b08bc08a2ad415b6fbc16d743cfebd88b67a96f Mon Sep 17 00:00:00 2001 From: Duckki Oe Date: Mon, 19 Aug 2024 20:48:09 -0700 Subject: [PATCH 076/108] test(dual-query-planner): added description to semantic diff failures (#5828) --- .../src/query_planner/dual_query_planner.rs | 318 +++++++++++++----- 1 file changed, 241 insertions(+), 77 deletions(-) diff --git a/apollo-router/src/query_planner/dual_query_planner.rs b/apollo-router/src/query_planner/dual_query_planner.rs index 0ef5bde512..6a880cf538 100644 --- a/apollo-router/src/query_planner/dual_query_planner.rs +++ b/apollo-router/src/query_planner/dual_query_planner.rs @@ -140,17 +140,20 @@ impl BothModeComparisonJob { (Ok(js_plan), Ok(rust_plan)) => { let js_root_node = &js_plan.query_plan.node; let rust_root_node = convert_root_query_plan_node(rust_plan); - is_matched = opt_plan_node_matches(js_root_node, &rust_root_node); - if is_matched { - tracing::debug!("JS and Rust query plans match{operation_desc}! 🎉"); - } else { - tracing::debug!("JS v.s. Rust query plan mismatch{operation_desc}"); - tracing::debug!( - "Diff of formatted plans:\n{}", - diff_plan(js_plan, rust_plan) - ); - tracing::trace!("JS query plan Debug: {js_root_node:#?}"); - tracing::trace!("Rust query plan Debug: {rust_root_node:#?}"); + let match_result = opt_plan_node_matches(js_root_node, &rust_root_node); + is_matched = match_result.is_ok(); + match match_result { + Ok(_) => tracing::debug!("JS and Rust query plans match{operation_desc}! 🎉"), + Err(err) => { + tracing::debug!("JS v.s. Rust query plan mismatch{operation_desc}"); + tracing::debug!("{}", err.full_description()); + tracing::debug!( + "Diff of formatted plans:\n{}", + diff_plan(js_plan, rust_plan) + ); + tracing::trace!("JS query plan Debug: {js_root_node:#?}"); + tracing::trace!("Rust query plan Debug: {rust_root_node:#?}"); + } } } } @@ -168,7 +171,62 @@ impl BothModeComparisonJob { // Specific comparison functions -fn fetch_node_matches(this: &FetchNode, other: &FetchNode) -> bool { +pub struct MatchFailure { + description: String, + backtrace: std::backtrace::Backtrace, +} + +impl MatchFailure { + pub fn description(&self) -> String { + self.description.clone() + } + + pub fn full_description(&self) -> String { + format!("{}\n\nBacktrace:\n{}", self.description, self.backtrace) + } + + fn new(description: String) -> MatchFailure { + MatchFailure { + description, + backtrace: std::backtrace::Backtrace::force_capture(), + } + } + + fn add_description(self: MatchFailure, description: &str) -> MatchFailure { + MatchFailure { + description: format!("{}\n{}", self.description, description), + backtrace: self.backtrace, + } + } +} + +macro_rules! check_match { + ($pred:expr) => { + if !$pred { + return Err(MatchFailure::new(format!( + "mismatch at {}", + stringify!($pred) + ))); + } + }; +} + +macro_rules! check_match_eq { + ($a:expr, $b:expr) => { + if $a != $b { + let message = format!( + "mismatch between {} and {}:\nleft: {:?}\nright: {:?}", + stringify!($a), + stringify!($b), + $a, + $b + ); + return Err(MatchFailure::new(message)); + } + }; +} + +fn fetch_node_matches(this: &FetchNode, other: &FetchNode) -> Result<(), MatchFailure> { let FetchNode { service_name, requires, @@ -183,16 +241,18 @@ fn fetch_node_matches(this: &FetchNode, other: &FetchNode) -> bool { schema_aware_hash: _, // ignored authorization, } = this; - *service_name == other.service_name - && same_selection_set_sorted(requires, &other.requires) - && vec_matches_sorted(variable_usages, &other.variable_usages) - && *operation_kind == other.operation_kind - && *id == other.id - && same_rewrites(input_rewrites, &other.input_rewrites) - && same_rewrites(output_rewrites, &other.output_rewrites) - && same_rewrites(context_rewrites, &other.context_rewrites) - && *authorization == other.authorization - && operation_matches(operation, &other.operation) + + check_match_eq!(*service_name, other.service_name); + check_match_eq!(*operation_kind, other.operation_kind); + check_match_eq!(*id, other.id); + check_match_eq!(*authorization, other.authorization); + check_match!(same_selection_set_sorted(requires, &other.requires)); + check_match!(vec_matches_sorted(variable_usages, &other.variable_usages)); + check_match!(same_rewrites(input_rewrites, &other.input_rewrites)); + check_match!(same_rewrites(output_rewrites, &other.output_rewrites)); + check_match!(same_rewrites(context_rewrites, &other.context_rewrites)); + operation_matches(operation, &other.operation)?; + Ok(()) } fn subscription_primary_matches(this: &SubscriptionNode, other: &SubscriptionNode) -> bool { @@ -211,22 +271,27 @@ fn subscription_primary_matches(this: &SubscriptionNode, other: &SubscriptionNod && *operation_kind == other.operation_kind && *input_rewrites == other.input_rewrites && *output_rewrites == other.output_rewrites - && operation_matches(operation, &other.operation) + && operation_matches(operation, &other.operation).is_ok() } -fn operation_matches(this: &SubgraphOperation, other: &SubgraphOperation) -> bool { +fn operation_matches( + this: &SubgraphOperation, + other: &SubgraphOperation, +) -> Result<(), MatchFailure> { let this_ast = match ast::Document::parse(this.as_serialized(), "this_operation.graphql") { Ok(document) => document, Err(_) => { - // TODO: log error - return false; + return Err(MatchFailure::new( + "Failed to parse this operation".to_string(), + )); } }; let other_ast = match ast::Document::parse(other.as_serialized(), "other_operation.graphql") { Ok(document) => document, Err(_) => { - // TODO: log error - return false; + return Err(MatchFailure::new( + "Failed to parse other operation".to_string(), + )); } }; same_ast_document(&this_ast, &other_ast) @@ -236,7 +301,7 @@ fn operation_matches(this: &SubgraphOperation, other: &SubgraphOperation) -> boo // but otherwise behave just like `PartialEq`: // Note: Reexported under `apollo_router::_private` -pub fn plan_matches(js_plan: &QueryPlanResult, rust_plan: &QueryPlan) -> bool { +pub fn plan_matches(js_plan: &QueryPlanResult, rust_plan: &QueryPlan) -> Result<(), MatchFailure> { let js_root_node = &js_plan.query_plan.node; let rust_root_node = convert_root_query_plan_node(rust_plan); opt_plan_node_matches(js_root_node, &rust_root_node) @@ -270,10 +335,14 @@ pub fn diff_plan(js_plan: &QueryPlanResult, rust_plan: &QueryPlan) -> String { fn opt_plan_node_matches( this: &Option>, other: &Option>, -) -> bool { +) -> Result<(), MatchFailure> { match (this, other) { - (None, None) => true, - (None, Some(_)) | (Some(_), None) => false, + (None, None) => Ok(()), + (None, Some(_)) | (Some(_), None) => Err(MatchFailure::new(format!( + "mismatch at opt_plan_node_matches\nleft: {:?}\nright: {:?}", + this.is_some(), + other.is_some() + ))), (Some(this), Some(other)) => plan_node_matches(this.borrow(), other.borrow()), } } @@ -283,6 +352,22 @@ fn vec_matches(this: &[T], other: &[T], item_matches: impl Fn(&T, &T) -> bool && std::iter::zip(this, other).all(|(this, other)| item_matches(this, other)) } +fn vec_matches_result( + this: &[T], + other: &[T], + item_matches: impl Fn(&T, &T) -> Result<(), MatchFailure>, +) -> Result<(), MatchFailure> { + check_match_eq!(this.len(), other.len()); + std::iter::zip(this, other) + .enumerate() + .try_fold((), |_acc, (index, (this, other))| { + item_matches(this, other) + .map_err(|err| err.add_description(&format!("under item[{}]", index))) + })?; + assert!(vec_matches(this, other, |a, b| item_matches(a, b).is_ok())); + Ok(()) +} + fn vec_matches_sorted(this: &[T], other: &[T]) -> bool { let mut this_sorted = this.to_owned(); let mut other_sorted = other.to_owned(); @@ -318,16 +403,65 @@ fn vec_matches_as_set(this: &[T], other: &[T], item_matches: impl Fn(&T, &T) }) } -fn plan_node_matches(this: &PlanNode, other: &PlanNode) -> bool { +fn vec_matches_result_as_set( + this: &[T], + other: &[T], + item_matches: impl Fn(&T, &T) -> bool, +) -> Result<(), MatchFailure> { + // Set-inclusion test in both directions + check_match_eq!(this.len(), other.len()); + for (index, this_node) in this.iter().enumerate() { + if !other + .iter() + .any(|other_node| item_matches(this_node, other_node)) + { + return Err(MatchFailure::new(format!( + "mismatched set: missing item[{}]", + index + ))); + } + } + for other_node in other.iter() { + if !this + .iter() + .any(|this_node| item_matches(this_node, other_node)) + { + return Err(MatchFailure::new( + "mismatched set: extra item found".to_string(), + )); + } + } + assert!(vec_matches_as_set(this, other, item_matches)); + Ok(()) +} + +fn option_to_string(name: Option) -> String { + name.map_or_else(|| "".to_string(), |name| name.to_string()) +} + +fn plan_node_matches(this: &PlanNode, other: &PlanNode) -> Result<(), MatchFailure> { match (this, other) { (PlanNode::Sequence { nodes: this }, PlanNode::Sequence { nodes: other }) => { - vec_matches(this, other, plan_node_matches) + vec_matches_result(this, other, plan_node_matches) + .map_err(|err| err.add_description("under Sequence node"))?; } (PlanNode::Parallel { nodes: this }, PlanNode::Parallel { nodes: other }) => { - vec_matches_as_set(this, other, plan_node_matches) + vec_matches_result_as_set(this, other, |a, b| plan_node_matches(a, b).is_ok()) + .map_err(|err| err.add_description("under Parallel node"))?; + } + (PlanNode::Fetch(this), PlanNode::Fetch(other)) => { + fetch_node_matches(this, other).map_err(|err| { + err.add_description(&format!( + "under Fetch node (operation name: {})", + option_to_string(this.operation_name.as_ref()) + )) + })?; + } + (PlanNode::Flatten(this), PlanNode::Flatten(other)) => { + flatten_node_matches(this, other).map_err(|err| { + err.add_description(&format!("under Flatten node (path: {})", this.path)) + })?; } - (PlanNode::Fetch(this), PlanNode::Fetch(other)) => fetch_node_matches(this, other), - (PlanNode::Flatten(this), PlanNode::Flatten(other)) => flatten_node_matches(this, other), ( PlanNode::Defer { primary, deferred }, PlanNode::Defer { @@ -335,8 +469,8 @@ fn plan_node_matches(this: &PlanNode, other: &PlanNode) -> bool { deferred: other_deferred, }, ) => { - defer_primary_node_matches(primary, other_primary) - && vec_matches(deferred, other_deferred, deferred_node_matches) + check_match!(defer_primary_node_matches(primary, other_primary)); + check_match!(vec_matches(deferred, other_deferred, deferred_node_matches)); } ( PlanNode::Subscription { primary, rest }, @@ -345,8 +479,9 @@ fn plan_node_matches(this: &PlanNode, other: &PlanNode) -> bool { rest: other_rest, }, ) => { - subscription_primary_matches(primary, other_primary) - && opt_plan_node_matches(rest, other_rest) + check_match!(subscription_primary_matches(primary, other_primary)); + opt_plan_node_matches(rest, other_rest) + .map_err(|err| err.add_description("under Subscription"))?; } ( PlanNode::Condition { @@ -360,17 +495,25 @@ fn plan_node_matches(this: &PlanNode, other: &PlanNode) -> bool { else_clause: other_else_clause, }, ) => { - condition == other_condition - && opt_plan_node_matches(if_clause, other_if_clause) - && opt_plan_node_matches(else_clause, other_else_clause) + check_match_eq!(condition, other_condition); + opt_plan_node_matches(if_clause, other_if_clause) + .map_err(|err| err.add_description("under Condition node (if_clause)"))?; + opt_plan_node_matches(else_clause, other_else_clause) + .map_err(|err| err.add_description("under Condition node (else_clause)"))?; } - _ => false, - } + _ => { + return Err(MatchFailure::new(format!( + "mismatched plan node types\nleft: {:?}\nright: {:?}", + this, other + ))) + } + }; + Ok(()) } fn defer_primary_node_matches(this: &Primary, other: &Primary) -> bool { let Primary { subselection, node } = this; - *subselection == other.subselection && opt_plan_node_matches(node, &other.node) + *subselection == other.subselection && opt_plan_node_matches(node, &other.node).is_ok() } fn deferred_node_matches(this: &DeferredNode, other: &DeferredNode) -> bool { @@ -385,12 +528,13 @@ fn deferred_node_matches(this: &DeferredNode, other: &DeferredNode) -> bool { && *label == other.label && *query_path == other.query_path && *subselection == other.subselection - && opt_plan_node_matches(node, &other.node) + && opt_plan_node_matches(node, &other.node).is_ok() } -fn flatten_node_matches(this: &FlattenNode, other: &FlattenNode) -> bool { +fn flatten_node_matches(this: &FlattenNode, other: &FlattenNode) -> Result<(), MatchFailure> { let FlattenNode { path, node } = this; - *path == other.path && plan_node_matches(node, &other.node) + check_match_eq!(*path, other.path); + plan_node_matches(node, &other.node) } // Copied and modified from `apollo_federation::operation::SelectionKey` @@ -478,7 +622,7 @@ fn same_rewrites(x: &Option>, y: &Option>) -> //================================================================================================== // AST comparison functions -fn same_ast_document(x: &ast::Document, y: &ast::Document) -> bool { +fn same_ast_document(x: &ast::Document, y: &ast::Document) -> Result<(), MatchFailure> { fn split_definitions( doc: &ast::Document, ) -> ( @@ -510,34 +654,54 @@ fn same_ast_document(x: &ast::Document, y: &ast::Document) -> bool { "Different number of operation definitions" ); - x_ops.len() == y_ops.len() - && x_ops - .iter() - .zip(y_ops.iter()) - .all(|(x_op, y_op)| same_ast_operation_definition(x_op, y_op)) - && x_frags.len() == y_frags.len() - && x_frags - .iter() - .zip(y_frags.iter()) - .all(|(x_frag, y_frag)| same_ast_fragment_definition(x_frag, y_frag)) + check_match_eq!(x_ops.len(), y_ops.len()); + x_ops + .iter() + .zip(y_ops.iter()) + .try_fold((), |_, (x_op, y_op)| { + same_ast_operation_definition(x_op, y_op) + .map_err(|err| err.add_description("under operation definition")) + })?; + check_match_eq!(x_frags.len(), y_frags.len()); + x_frags + .iter() + .zip(y_frags.iter()) + .try_fold((), |_, (x_frag, y_frag)| { + same_ast_fragment_definition(x_frag, y_frag) + .map_err(|err| err.add_description("under fragment definition")) + })?; + Ok(()) } fn same_ast_operation_definition( x: &ast::OperationDefinition, y: &ast::OperationDefinition, -) -> bool { +) -> Result<(), MatchFailure> { // Note: Operation names are ignored, since parallel fetches may have different names. - x.operation_type == y.operation_type - && vec_matches_sorted_by(&x.variables, &y.variables, |x, y| x.name.cmp(&y.name)) - && x.directives == y.directives - && same_ast_selection_set_sorted(&x.selection_set, &y.selection_set) -} - -fn same_ast_fragment_definition(x: &ast::FragmentDefinition, y: &ast::FragmentDefinition) -> bool { - x.name == y.name - && x.type_condition == y.type_condition - && x.directives == y.directives - && same_ast_selection_set_sorted(&x.selection_set, &y.selection_set) + check_match_eq!(x.operation_type, y.operation_type); + check_match!(vec_matches_sorted_by(&x.variables, &y.variables, |x, y| x + .name + .cmp(&y.name))); + check_match_eq!(x.directives, y.directives); + check_match!(same_ast_selection_set_sorted( + &x.selection_set, + &y.selection_set + )); + Ok(()) +} + +fn same_ast_fragment_definition( + x: &ast::FragmentDefinition, + y: &ast::FragmentDefinition, +) -> Result<(), MatchFailure> { + check_match_eq!(x.name, y.name); + check_match_eq!(x.type_condition, y.type_condition); + check_match_eq!(x.directives, y.directives); + check_match!(same_ast_selection_set_sorted( + &x.selection_set, + &y.selection_set + )); + Ok(()) } fn get_ast_selection_key(selection: &ast::Selection) -> SelectionKey { @@ -617,7 +781,7 @@ mod ast_comparison_tests { let op_y = r#"query($qv1: Int!, $qv2: String!) { x(arg1: $qv1, arg2: $qv2) }"#; let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y)); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); } #[test] @@ -634,7 +798,7 @@ mod ast_comparison_tests { "#; let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y)); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); } #[test] @@ -643,7 +807,7 @@ mod ast_comparison_tests { let op_y = r#"{ y x { z w } }"#; let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y)); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); } #[test] @@ -652,6 +816,6 @@ mod ast_comparison_tests { let op_y = r#"{ q { ...f1 ...f2 } } fragment f2 on T { w z } fragment f1 on T { x y }"#; let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y)); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); } } From 50a56f129c5c13883c68b354c2e14d0ac8d7f423 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Tue, 20 Aug 2024 09:29:37 +0200 Subject: [PATCH 077/108] chore(federation): introduce sorted `ArgumentList`, `DirectiveList` types (#5825) --- apollo-federation/src/operation/contains.rs | 203 +-------- .../src/operation/directive_list.rs | 410 ++++++++++++++++++ apollo-federation/src/operation/mod.rs | 244 +++++++---- apollo-federation/src/operation/optimize.rs | 15 +- apollo-federation/src/operation/simplify.rs | 34 +- apollo-federation/src/operation/tests/mod.rs | 24 +- .../src/query_graph/graph_path.rs | 47 +- .../src/query_graph/path_tree.rs | 5 +- .../src/query_plan/conditions.rs | 30 +- .../src/query_plan/fetch_dependency_graph.rs | 24 +- .../fetch_dependency_graph_processor.rs | 6 +- 11 files changed, 642 insertions(+), 400 deletions(-) create mode 100644 apollo-federation/src/operation/directive_list.rs diff --git a/apollo-federation/src/operation/contains.rs b/apollo-federation/src/operation/contains.rs index 9b306504a2..e69f978b3f 100644 --- a/apollo-federation/src/operation/contains.rs +++ b/apollo-federation/src/operation/contains.rs @@ -1,7 +1,4 @@ -use apollo_compiler::collections::IndexMap; use apollo_compiler::executable; -use apollo_compiler::Name; -use apollo_compiler::Node; use super::FieldSelection; use super::FragmentSpreadSelection; @@ -10,202 +7,6 @@ use super::InlineFragmentSelection; use super::Selection; use super::SelectionSet; -/// Compare two input values, with two special cases for objects: assuming no duplicate keys, -/// and order-independence. -/// -/// This comes from apollo-rs: https://github.com/apollographql/apollo-rs/blob/6825be88fe13cd0d67b83b0e4eb6e03c8ab2555e/crates/apollo-compiler/src/validation/selection.rs#L160-L188 -/// Hopefully we can do this more easily in the future! -fn same_value(left: &executable::Value, right: &executable::Value) -> bool { - use apollo_compiler::executable::Value; - match (left, right) { - (Value::Null, Value::Null) => true, - (Value::Enum(left), Value::Enum(right)) => left == right, - (Value::Variable(left), Value::Variable(right)) => left == right, - (Value::String(left), Value::String(right)) => left == right, - (Value::Float(left), Value::Float(right)) => left == right, - (Value::Int(left), Value::Int(right)) => left == right, - (Value::Boolean(left), Value::Boolean(right)) => left == right, - (Value::List(left), Value::List(right)) if left.len() == right.len() => left - .iter() - .zip(right.iter()) - .all(|(left, right)| same_value(left, right)), - (Value::Object(left), Value::Object(right)) if left.len() == right.len() => { - left.iter().all(|(key, value)| { - right - .iter() - .find(|(other_key, _)| key == other_key) - .is_some_and(|(_, other_value)| same_value(value, other_value)) - }) - } - _ => false, - } -} - -/// Sort an input value, which means specifically sorting their object values by keys (assuming no -/// duplicates). This is used for hashing input values in a way consistent with [same_value()]. -fn sort_value(value: &mut executable::Value) { - use apollo_compiler::executable::Value; - match value { - Value::List(elems) => { - elems - .iter_mut() - .for_each(|value| sort_value(value.make_mut())); - } - Value::Object(pairs) => { - pairs - .iter_mut() - .for_each(|(_, value)| sort_value(value.make_mut())); - pairs.sort_by(|left, right| left.0.cmp(&right.0)); - } - _ => {} - } -} - -/// Compare sorted input values, which means specifically establishing an order between the variants -/// of input values, and comparing values for the same variants accordingly. This is used for -/// hashing directives in a way consistent with [same_directives()]. -/// -/// Note that Floats and Ints are compared textually and not parsed numerically. This is fine for -/// the purposes of hashing. For object comparison semantics, see [compare_sorted_object_pairs()]. -fn compare_sorted_value(left: &executable::Value, right: &executable::Value) -> std::cmp::Ordering { - use apollo_compiler::executable::Value; - fn discriminant(value: &Value) -> u8 { - match value { - Value::Null => 0, - Value::Enum(_) => 1, - Value::Variable(_) => 2, - Value::String(_) => 3, - Value::Float(_) => 4, - Value::Int(_) => 5, - Value::Boolean(_) => 6, - Value::List(_) => 7, - Value::Object(_) => 8, - } - } - match (left, right) { - (Value::Null, Value::Null) => std::cmp::Ordering::Equal, - (Value::Enum(left), Value::Enum(right)) => left.cmp(right), - (Value::Variable(left), Value::Variable(right)) => left.cmp(right), - (Value::String(left), Value::String(right)) => left.cmp(right), - (Value::Float(left), Value::Float(right)) => left.as_str().cmp(right.as_str()), - (Value::Int(left), Value::Int(right)) => left.as_str().cmp(right.as_str()), - (Value::Boolean(left), Value::Boolean(right)) => left.cmp(right), - (Value::List(left), Value::List(right)) => left.len().cmp(&right.len()).then_with(|| { - left.iter() - .zip(right) - .map(|(left, right)| compare_sorted_value(left, right)) - .find(|o| o.is_ne()) - .unwrap_or(std::cmp::Ordering::Equal) - }), - (Value::Object(left), Value::Object(right)) => compare_sorted_name_value_pairs( - left.iter().map(|pair| &pair.0), - left.iter().map(|pair| &pair.1), - right.iter().map(|pair| &pair.0), - right.iter().map(|pair| &pair.1), - ), - _ => discriminant(left).cmp(&discriminant(right)), - } -} - -/// Compare the (name, value) pair iterators, which are assumed to be sorted by name and have sorted -/// values. This is used for hashing objects/arguments in a way consistent with [same_directives()]. -/// -/// Note that pair iterators are compared by length, then lexicographically by name, then finally -/// recursively by value. This is intended to compute an ordering quickly for hashing. -fn compare_sorted_name_value_pairs<'doc>( - left_names: impl ExactSizeIterator, - left_values: impl ExactSizeIterator>, - right_names: impl ExactSizeIterator, - right_values: impl ExactSizeIterator>, -) -> std::cmp::Ordering { - left_names - .len() - .cmp(&right_names.len()) - .then_with(|| left_names.cmp(right_names)) - .then_with(|| { - left_values - .zip(right_values) - .map(|(left, right)| compare_sorted_value(left, right)) - .find(|o| o.is_ne()) - .unwrap_or(std::cmp::Ordering::Equal) - }) -} - -/// Returns true if two argument lists are equivalent. -/// -/// The arguments and values must be the same, independent of order. -fn same_arguments( - left: &[Node], - right: &[Node], -) -> bool { - if left.len() != right.len() { - return false; - } - - let right = right - .iter() - .map(|arg| (&arg.name, arg)) - .collect::>(); - - left.iter().all(|arg| { - right - .get(&arg.name) - .is_some_and(|right_arg| same_value(&arg.value, &right_arg.value)) - }) -} - -/// Sort arguments, which means specifically sorting arguments by names and object values by keys -/// (assuming no duplicates). This is used for hashing arguments in a way consistent with -/// [same_arguments()]. -pub(super) fn sort_arguments(arguments: &mut [Node]) { - arguments - .iter_mut() - .for_each(|arg| sort_value(arg.make_mut().value.make_mut())); - arguments.sort_by(|left, right| left.name.cmp(&right.name)); -} - -/// Compare sorted arguments; see [compare_sorted_name_value_pairs()] for semantics. This is used -/// for hashing directives in a way consistent with [same_directives()]. -fn compare_sorted_arguments( - left: &[Node], - right: &[Node], -) -> std::cmp::Ordering { - compare_sorted_name_value_pairs( - left.iter().map(|arg| &arg.name), - left.iter().map(|arg| &arg.value), - right.iter().map(|arg| &arg.name), - right.iter().map(|arg| &arg.value), - ) -} - -/// Returns true if two directive lists are equivalent, independent of order. -fn same_directives(left: &executable::DirectiveList, right: &executable::DirectiveList) -> bool { - if left.len() != right.len() { - return false; - } - - left.iter().all(|left_directive| { - right.iter().any(|right_directive| { - left_directive.name == right_directive.name - && same_arguments(&left_directive.arguments, &right_directive.arguments) - }) - }) -} - -/// Sort directives, which means specifically sorting their arguments, sorting the directives by -/// name, and then breaking directive-name ties by comparing sorted arguments. This is used for -/// hashing arguments in a way consistent with [same_directives()]. -pub(super) fn sort_directives(directives: &mut executable::DirectiveList) { - directives - .iter_mut() - .for_each(|directive| sort_arguments(&mut directive.make_mut().arguments)); - directives.sort_by(|left, right| { - left.name - .cmp(&right.name) - .then_with(|| compare_sorted_arguments(&left.arguments, &right.arguments)) - }); -} - pub(super) fn is_deferred_selection(directives: &executable::DirectiveList) -> bool { directives.has("defer") } @@ -277,8 +78,8 @@ impl FieldSelection { pub fn containment(&self, other: &FieldSelection, options: ContainmentOptions) -> Containment { if self.field.name() != other.field.name() || self.field.alias != other.field.alias - || !same_arguments(&self.field.arguments, &other.field.arguments) - || !same_directives(&self.field.directives, &other.field.directives) + || self.field.arguments != other.field.arguments + || self.field.directives != other.field.directives { return Containment::NotContained; } diff --git a/apollo-federation/src/operation/directive_list.rs b/apollo-federation/src/operation/directive_list.rs new file mode 100644 index 0000000000..913a1184e6 --- /dev/null +++ b/apollo-federation/src/operation/directive_list.rs @@ -0,0 +1,410 @@ +use std::fmt; +use std::fmt::Display; +use std::hash::BuildHasher; +use std::hash::Hash; +use std::hash::Hasher; +use std::ops::Deref; +use std::sync::Arc; +use std::sync::OnceLock; + +use apollo_compiler::executable; +use apollo_compiler::Name; +use apollo_compiler::Node; + +use super::sort_arguments; + +/// Compare sorted input values, which means specifically establishing an order between the variants +/// of input values, and comparing values for the same variants accordingly. +/// +/// Note that Floats and Ints are compared textually and not parsed numerically. This is fine for +/// the purposes of hashing. +fn compare_sorted_value(left: &executable::Value, right: &executable::Value) -> std::cmp::Ordering { + use apollo_compiler::executable::Value; + /// Returns an arbitrary index for each value type so values of different types are sorted consistently. + fn discriminant(value: &Value) -> u8 { + match value { + Value::Null => 0, + Value::Enum(_) => 1, + Value::Variable(_) => 2, + Value::String(_) => 3, + Value::Float(_) => 4, + Value::Int(_) => 5, + Value::Boolean(_) => 6, + Value::List(_) => 7, + Value::Object(_) => 8, + } + } + match (left, right) { + (Value::Null, Value::Null) => std::cmp::Ordering::Equal, + (Value::Enum(left), Value::Enum(right)) => left.cmp(right), + (Value::Variable(left), Value::Variable(right)) => left.cmp(right), + (Value::String(left), Value::String(right)) => left.cmp(right), + (Value::Float(left), Value::Float(right)) => left.as_str().cmp(right.as_str()), + (Value::Int(left), Value::Int(right)) => left.as_str().cmp(right.as_str()), + (Value::Boolean(left), Value::Boolean(right)) => left.cmp(right), + (Value::List(left), Value::List(right)) => left.len().cmp(&right.len()).then_with(|| { + left.iter() + .zip(right) + .map(|(left, right)| compare_sorted_value(left, right)) + .find(|o| o.is_ne()) + .unwrap_or(std::cmp::Ordering::Equal) + }), + (Value::Object(left), Value::Object(right)) => compare_sorted_name_value_pairs( + left.iter().map(|pair| &pair.0), + left.iter().map(|pair| &pair.1), + right.iter().map(|pair| &pair.0), + right.iter().map(|pair| &pair.1), + ), + _ => discriminant(left).cmp(&discriminant(right)), + } +} + +/// Compare the (name, value) pair iterators, which are assumed to be sorted by name and have sorted +/// values. This is used for hashing objects/arguments in a way consistent with [same_directives()]. +/// +/// Note that pair iterators are compared by length, then lexicographically by name, then finally +/// recursively by value. This is intended to compute an ordering quickly for hashing. +fn compare_sorted_name_value_pairs<'doc>( + left_names: impl ExactSizeIterator, + left_values: impl ExactSizeIterator>, + right_names: impl ExactSizeIterator, + right_values: impl ExactSizeIterator>, +) -> std::cmp::Ordering { + left_names + .len() + .cmp(&right_names.len()) + .then_with(|| left_names.cmp(right_names)) + .then_with(|| { + left_values + .zip(right_values) + .map(|(left, right)| compare_sorted_value(left, right)) + .find(|o| o.is_ne()) + .unwrap_or(std::cmp::Ordering::Equal) + }) +} + +/// Compare sorted arguments; see [compare_sorted_name_value_pairs()] for semantics. This is used +/// for hashing directives in a way consistent with [same_directives()]. +fn compare_sorted_arguments( + left: &[Node], + right: &[Node], +) -> std::cmp::Ordering { + compare_sorted_name_value_pairs( + left.iter().map(|arg| &arg.name), + left.iter().map(|arg| &arg.value), + right.iter().map(|arg| &arg.name), + right.iter().map(|arg| &arg.value), + ) +} + +/// An empty apollo-compiler directive list that we can return a reference to when a +/// [`DirectiveList`] is in the empty state. +static EMPTY_DIRECTIVE_LIST: executable::DirectiveList = executable::DirectiveList(vec![]); + +/// Contents for a non-empty directive list. +#[derive(Debug, Clone)] +struct DirectiveListInner { + // Cached hash: hashing may be expensive with deeply nested values or very many directives, + // so we only want to do it once. + // The hash is eagerly precomputed because we expect to, most of the time, hash a DirectiveList + // at least once (when inserting its selection into a selection map). + hash: u64, + // Mutable access to the underlying directive list should not be handed out because `sort_order` + // may get out of sync. + directives: executable::DirectiveList, + sort_order: Vec, +} + +impl PartialEq for DirectiveListInner { + fn eq(&self, other: &Self) -> bool { + self.hash == other.hash + && self + .iter_sorted() + .zip(other.iter_sorted()) + .all(|(left, right)| { + // We can just use `Eq` because the arguments are sorted recursively + left.name == right.name && left.arguments == right.arguments + }) + } +} + +impl Eq for DirectiveListInner {} + +impl DirectiveListInner { + fn rehash(&mut self) { + static SHARED_RANDOM: OnceLock = OnceLock::new(); + + let mut state = SHARED_RANDOM.get_or_init(Default::default).build_hasher(); + self.len().hash(&mut state); + // Hash in sorted order + for d in self.iter_sorted() { + d.hash(&mut state); + } + self.hash = state.finish(); + } + + fn len(&self) -> usize { + self.directives.len() + } + + fn iter_sorted(&self) -> DirectiveIterSorted<'_> { + DirectiveIterSorted { + directives: &self.directives.0, + inner: self.sort_order.iter(), + } + } +} + +/// A list of directives, with order-independent hashing and equality. +/// +/// Original order of directive applications is stored but is not part of hashing, +/// so it may not be maintained exactly when round-tripping several directive lists +/// through a HashSet for example. +/// +/// Arguments and input object values provided to directives are all sorted and the +/// original order is not tracked. +/// +/// This list is cheaply cloneable, but not intended for frequent mutations. +/// When the list is empty, it does not require an allocation. +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub(crate) struct DirectiveList { + inner: Option>, +} + +impl Deref for DirectiveList { + type Target = executable::DirectiveList; + fn deref(&self) -> &Self::Target { + self.inner + .as_ref() + .map_or(&EMPTY_DIRECTIVE_LIST, |inner| &inner.directives) + } +} + +impl Hash for DirectiveList { + fn hash(&self, state: &mut H) { + state.write_u64(self.inner.as_ref().map_or(0, |inner| inner.hash)) + } +} + +impl Display for DirectiveList { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(inner) = &self.inner { + inner.directives.fmt(f) + } else { + Ok(()) + } + } +} + +impl From for DirectiveList { + fn from(mut directives: executable::DirectiveList) -> Self { + if directives.is_empty() { + return Self::new(); + } + + // Sort directives, which means specifically sorting their arguments, sorting the directives by + // name, and then breaking directive-name ties by comparing sorted arguments. This is used for + // hashing arguments in a way consistent with [same_directives()]. + + for directive in directives.iter_mut() { + sort_arguments(&mut directive.make_mut().arguments); + } + + let mut sort_order = (0usize..directives.len()).collect::>(); + sort_order.sort_by(|left, right| { + let left = &directives[*left]; + let right = &directives[*right]; + left.name + .cmp(&right.name) + .then_with(|| compare_sorted_arguments(&left.arguments, &right.arguments)) + }); + + let mut partially_initialized = DirectiveListInner { + hash: 0, + directives, + sort_order, + }; + partially_initialized.rehash(); + Self { + inner: Some(Arc::new(partially_initialized)), + } + } +} + +impl FromIterator> for DirectiveList { + fn from_iter>>(iter: T) -> Self { + Self::from(executable::DirectiveList::from_iter(iter)) + } +} + +impl FromIterator for DirectiveList { + fn from_iter>(iter: T) -> Self { + Self::from(executable::DirectiveList::from_iter(iter)) + } +} + +impl DirectiveList { + /// Create an empty directive list. + pub(crate) const fn new() -> Self { + Self { inner: None } + } + + /// Create a directive list with a single directive. + /// + /// This sorts arguments and input object values provided to the directive. + pub(crate) fn one(directive: impl Into>) -> Self { + std::iter::once(directive.into()).collect() + } + + #[cfg(test)] + pub(crate) fn parse(input: &str) -> Self { + use apollo_compiler::ast; + let input = format!( + r#"query {{ field +# Directive input: +{input} +# +}}"# + ); + let mut parser = apollo_compiler::parser::Parser::new(); + let document = parser + .parse_ast(&input, "DirectiveList::parse.graphql") + .unwrap(); + let Some(ast::Definition::OperationDefinition(operation)) = document.definitions.first() + else { + unreachable!(); + }; + let Some(ast::Selection::Field(field)) = operation.selection_set.first() else { + unreachable!(); + }; + field.directives.clone().into() + } + + /// Iterate the directives in their original order. + pub(crate) fn iter(&self) -> impl ExactSizeIterator> { + self.inner + .as_ref() + .map_or(&EMPTY_DIRECTIVE_LIST, |inner| &inner.directives) + .iter() + } + + /// Iterate the directives in a consistent sort order. + pub(crate) fn iter_sorted(&self) -> DirectiveIterSorted<'_> { + self.inner + .as_ref() + .map_or_else(DirectiveIterSorted::empty, |inner| inner.iter_sorted()) + } + + /// Remove one directive application by name. + /// + /// To remove a repeatable directive, you may need to call this multiple times. + pub(crate) fn remove_one(&mut self, name: &str) -> Option> { + let Some(inner) = self.inner.as_mut() else { + // Nothing to do on an empty list + return None; + }; + let Some(index) = inner.directives.iter().position(|dir| dir.name == name) else { + return None; + }; + + // The directive exists and is the only directive: switch to the empty representation + if inner.len() == 1 { + // The index is guaranteed to exist so we can safely use the panicky [] syntax. + let item = inner.directives[index].clone(); + self.inner = None; + return Some(item); + } + + // The directive exists: clone the inner structure if necessary. + let inner = Arc::make_mut(inner); + let sort_index = inner + .sort_order + .iter() + .position(|sorted| *sorted == index) + .expect("index must exist in sort order"); + let item = inner.directives.remove(index); + inner.sort_order.remove(sort_index); + + for order in &mut inner.sort_order { + if *order > index { + *order -= 1; + } + } + inner.rehash(); + Some(item) + } +} + +/// Iterate over a [`DirectiveList`] in a consistent sort order. +pub(crate) struct DirectiveIterSorted<'a> { + directives: &'a [Node], + inner: std::slice::Iter<'a, usize>, +} +impl<'a> Iterator for DirectiveIterSorted<'a> { + type Item = &'a Node; + + fn next(&mut self) -> Option { + self.inner.next().map(|index| &self.directives[*index]) + } +} + +impl ExactSizeIterator for DirectiveIterSorted<'_> { + fn len(&self) -> usize { + self.inner.len() + } +} + +impl DirectiveIterSorted<'_> { + fn empty() -> Self { + Self { + directives: &[], + inner: [].iter(), + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use super::*; + + #[test] + fn consistent_hash() { + let mut set = HashSet::new(); + + assert!(set.insert(DirectiveList::new())); + assert!(!set.insert(DirectiveList::new())); + + assert!(set.insert(DirectiveList::parse("@a @b"))); + assert!(!set.insert(DirectiveList::parse("@b @a"))); + } + + #[test] + fn order_independent_equality() { + assert_eq!(DirectiveList::new(), DirectiveList::new()); + assert_eq!( + DirectiveList::parse("@a @b"), + DirectiveList::parse("@b @a"), + "equality should be order independent" + ); + + assert_eq!( + DirectiveList::parse("@a(arg1: true, arg2: false) @b(arg2: false, arg1: true)"), + DirectiveList::parse("@b(arg1: true, arg2: false) @a(arg1: true, arg2: false)"), + "arguments should be order independent" + ); + + assert_eq!( + DirectiveList::parse("@nested(object: { a: 1, b: 2, c: 3 })"), + DirectiveList::parse("@nested(object: { b: 2, c: 3, a: 1 })"), + "input objects should be order independent" + ); + + assert_eq!( + DirectiveList::parse("@nested(object: [true, { a: 1, b: 2, c: { a: 3 } }])"), + DirectiveList::parse("@nested(object: [true, { b: 2, c: { a: 3 }, a: 1 }])"), + "input objects should be order independent" + ); + } +} diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index a6ed20d3f6..71b4aece8b 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -19,7 +19,6 @@ use std::hash::Hash; use std::ops::Deref; use std::sync::atomic; use std::sync::Arc; -use std::sync::OnceLock; use apollo_compiler::collections::IndexMap; use apollo_compiler::collections::IndexSet; @@ -48,6 +47,7 @@ use crate::schema::position::SchemaRootDefinitionKind; use crate::schema::ValidFederationSchema; mod contains; +mod directive_list; mod merging; mod optimize; mod rebase; @@ -56,6 +56,7 @@ mod simplify; mod tests; pub(crate) use contains::*; +pub(crate) use directive_list::DirectiveList; pub(crate) use merging::*; pub(crate) use rebase::*; @@ -81,6 +82,101 @@ impl SelectionId { } } +/// A list of arguments to a field or directive. +/// +/// All arguments and input object values are sorted in a consistent order. +/// +/// This type is immutable and cheaply cloneable. +#[derive(Clone, PartialEq, Eq, Default)] +pub(crate) struct ArgumentList { + /// The inner list *must* be sorted with `sort_arguments`. + inner: Option]>>, +} + +impl std::fmt::Debug for ArgumentList { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + // Print the slice representation. + self.deref().fmt(f) + } +} + +/// Sort an input value, which means specifically sorting their object values by keys (assuming no +/// duplicates). +/// +/// After sorting, hashing and plain-Rust equality have the expected result for values that are +/// spec-equivalent. +fn sort_value(value: &mut executable::Value) { + use apollo_compiler::executable::Value; + match value { + Value::List(elems) => { + elems + .iter_mut() + .for_each(|value| sort_value(value.make_mut())); + } + Value::Object(pairs) => { + pairs + .iter_mut() + .for_each(|(_, value)| sort_value(value.make_mut())); + pairs.sort_by(|left, right| left.0.cmp(&right.0)); + } + _ => {} + } +} + +/// Sort arguments, which means specifically sorting arguments by names and object values by keys +/// (assuming no duplicates). +/// +/// After sorting, hashing and plain-Rust equality have the expected result for lists that are +/// spec-equivalent. +fn sort_arguments(arguments: &mut [Node]) { + arguments + .iter_mut() + .for_each(|arg| sort_value(arg.make_mut().value.make_mut())); + arguments.sort_by(|left, right| left.name.cmp(&right.name)); +} + +impl From>> for ArgumentList { + fn from(mut arguments: Vec>) -> Self { + if arguments.is_empty() { + return Self::new(); + } + + sort_arguments(&mut arguments); + + Self { + inner: Some(Arc::from(arguments)), + } + } +} + +impl FromIterator> for ArgumentList { + fn from_iter>>(iter: T) -> Self { + Self::from(Vec::from_iter(iter)) + } +} + +impl Deref for ArgumentList { + type Target = [Node]; + + fn deref(&self) -> &Self::Target { + self.inner.as_deref().unwrap_or_default() + } +} + +impl ArgumentList { + /// Create an empty argument list. + pub(crate) const fn new() -> Self { + Self { inner: None } + } + + /// Create a argument list with a single argument. + /// + /// This sorts any input object values provided to the argument. + pub(crate) fn one(argument: impl Into>) -> Self { + Self::from(vec![argument.into()]) + } +} + /// An analogue of the apollo-compiler type `Operation` with these changes: /// - Stores the schema that the operation is queried against. /// - Swaps `operation_type` with `root_kind` (using the analogous apollo-federation type). @@ -93,7 +189,7 @@ pub struct Operation { pub(crate) root_kind: SchemaRootDefinitionKind, pub(crate) name: Option, pub(crate) variables: Arc>>, - pub(crate) directives: Arc, + pub(crate) directives: DirectiveList, pub(crate) selection_set: SelectionSet, pub(crate) named_fragments: NamedFragments, } @@ -138,7 +234,7 @@ impl Operation { root_kind: operation.operation_type.into(), name: operation.name.clone(), variables: Arc::new(operation.variables.clone()), - directives: Arc::new(operation.directives.clone()), + directives: operation.directives.clone().into(), selection_set, named_fragments, }) @@ -215,7 +311,6 @@ mod selection_map { use std::sync::Arc; use apollo_compiler::collections::IndexMap; - use apollo_compiler::executable; use serde::Serialize; use crate::error::FederationError; @@ -223,6 +318,7 @@ mod selection_map { use crate::operation::field_selection::FieldSelection; use crate::operation::fragment_spread_selection::FragmentSpreadSelection; use crate::operation::inline_fragment_selection::InlineFragmentSelection; + use crate::operation::DirectiveList; use crate::operation::HasSelectionKey; use crate::operation::Selection; use crate::operation::SelectionKey; @@ -434,7 +530,7 @@ mod selection_map { } } - pub(super) fn get_directives_mut(&mut self) -> &mut Arc { + pub(super) fn get_directives_mut(&mut self) -> &mut DirectiveList { match self { Self::Field(field) => field.get_directives_mut(), Self::FragmentSpread(spread) => spread.get_directives_mut(), @@ -467,7 +563,7 @@ mod selection_map { Arc::make_mut(self.0).field.sibling_typename_mut() } - pub(super) fn get_directives_mut(&mut self) -> &mut Arc { + pub(super) fn get_directives_mut(&mut self) -> &mut DirectiveList { Arc::make_mut(self.0).field.directives_mut() } @@ -484,7 +580,7 @@ mod selection_map { Self(fragment_spread_selection) } - pub(super) fn get_directives_mut(&mut self) -> &mut Arc { + pub(super) fn get_directives_mut(&mut self) -> &mut DirectiveList { Arc::make_mut(self.0).spread.directives_mut() } @@ -509,7 +605,7 @@ mod selection_map { self.0 } - pub(super) fn get_directives_mut(&mut self) -> &mut Arc { + pub(super) fn get_directives_mut(&mut self) -> &mut DirectiveList { Arc::make_mut(self.0).inline_fragment.directives_mut() } @@ -617,21 +713,21 @@ pub(crate) enum SelectionKey { response_name: Name, /// directives applied on the field #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] - directives: Arc, + directives: DirectiveList, }, FragmentSpread { /// The name of the fragment. fragment_name: Name, /// Directives applied on the fragment spread (does not contain @defer). #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] - directives: Arc, + directives: DirectiveList, }, InlineFragment { /// The optional type condition of the fragment. type_condition: Option, /// Directives applied on the fragment spread (does not contain @defer). #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] - directives: Arc, + directives: DirectiveList, }, Defer { /// Unique selection ID used to distinguish deferred fragment spreads that cannot be merged. @@ -746,7 +842,7 @@ impl Selection { } } - fn directives(&self) -> &Arc { + fn directives(&self) -> &DirectiveList { match self { Selection::Field(field_selection) => &field_selection.field.directives, Selection::FragmentSpread(fragment_spread_selection) => { @@ -876,7 +972,7 @@ impl Selection { pub(crate) fn with_updated_directives( &self, - directives: executable::DirectiveList, + directives: impl Into, ) -> Result { match self { Selection::Field(field) => Ok(Selection::Field(Arc::new( @@ -967,7 +1063,7 @@ pub(crate) struct Fragment { pub(crate) schema: ValidFederationSchema, pub(crate) name: Name, pub(crate) type_condition_position: CompositeTypeDefinitionPosition, - pub(crate) directives: Arc, + pub(crate) directives: DirectiveList, pub(crate) selection_set: SelectionSet, } @@ -983,7 +1079,7 @@ impl Fragment { type_condition_position: schema .get_type(fragment.type_condition().clone())? .try_into()?, - directives: Arc::new(fragment.directives.clone()), + directives: fragment.directives.clone().into(), selection_set: SelectionSet::from_selection_set( &fragment.selection_set, named_fragments, @@ -1001,17 +1097,14 @@ mod field_selection { use std::hash::Hash; use std::hash::Hasher; use std::ops::Deref; - use std::sync::Arc; use apollo_compiler::ast; - use apollo_compiler::executable; use apollo_compiler::Name; - use apollo_compiler::Node; use serde::Serialize; use crate::error::FederationError; - use crate::operation::sort_arguments; - use crate::operation::sort_directives; + use crate::operation::ArgumentList; + use crate::operation::DirectiveList; use crate::operation::HasSelectionKey; use crate::operation::SelectionKey; use crate::operation::SelectionSet; @@ -1054,10 +1147,7 @@ mod field_selection { } } - pub(crate) fn with_updated_directives( - &self, - directives: executable::DirectiveList, - ) -> Self { + pub(crate) fn with_updated_directives(&self, directives: impl Into) -> Self { Self { field: self.field.with_updated_directives(directives), selection_set: self.selection_set.clone(), @@ -1081,8 +1171,6 @@ mod field_selection { pub(crate) struct Field { data: FieldData, key: SelectionKey, - #[serde(serialize_with = "crate::display_helpers::serialize_as_debug_string")] - sorted_arguments: Arc>>, } impl std::fmt::Debug for Field { @@ -1095,7 +1183,7 @@ mod field_selection { fn eq(&self, other: &Self) -> bool { self.data.field_position.field_name() == other.data.field_position.field_name() && self.key == other.key - && self.sorted_arguments == other.sorted_arguments + && self.data.arguments == other.data.arguments } } @@ -1105,7 +1193,7 @@ mod field_selection { fn hash(&self, state: &mut H) { self.data.field_position.field_name().hash(state); self.key.hash(state); - self.sorted_arguments.hash(state); + self.data.arguments.hash(state); } } @@ -1119,11 +1207,8 @@ mod field_selection { impl Field { pub(crate) fn new(data: FieldData) -> Self { - let mut arguments = data.arguments.as_ref().clone(); - sort_arguments(&mut arguments); Self { key: data.key(), - sorted_arguments: Arc::new(arguments), data, } } @@ -1203,7 +1288,7 @@ mod field_selection { &self.data } - pub(super) fn directives_mut(&mut self) -> &mut Arc { + pub(super) fn directives_mut(&mut self) -> &mut DirectiveList { &mut self.data.directives } @@ -1217,10 +1302,10 @@ mod field_selection { pub(crate) fn with_updated_directives( &self, - directives: executable::DirectiveList, + directives: impl Into, ) -> Field { let mut data = self.data.clone(); - data.directives = Arc::new(directives); + data.directives = directives.into(); Self::new(data) } @@ -1260,9 +1345,9 @@ mod field_selection { pub(crate) field_position: FieldDefinitionPosition, pub(crate) alias: Option, #[serde(serialize_with = "crate::display_helpers::serialize_as_debug_string")] - pub(crate) arguments: Arc>>, + pub(crate) arguments: ArgumentList, #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] - pub(crate) directives: Arc, + pub(crate) directives: DirectiveList, pub(crate) sibling_typename: Option, } @@ -1311,11 +1396,9 @@ mod field_selection { impl HasSelectionKey for FieldData { fn key(&self) -> SelectionKey { - let mut directives = self.directives.as_ref().clone(); - sort_directives(&mut directives); SelectionKey::Field { response_name: self.response_name(), - directives: Arc::new(directives), + directives: self.directives.clone(), } } } @@ -1328,14 +1411,12 @@ pub(crate) use field_selection::SiblingTypename; mod fragment_spread_selection { use std::ops::Deref; - use std::sync::Arc; - use apollo_compiler::executable; use apollo_compiler::Name; use serde::Serialize; use crate::operation::is_deferred_selection; - use crate::operation::sort_directives; + use crate::operation::DirectiveList; use crate::operation::HasSelectionKey; use crate::operation::SelectionId; use crate::operation::SelectionKey; @@ -1398,7 +1479,7 @@ mod fragment_spread_selection { &self.data } - pub(super) fn directives_mut(&mut self) -> &mut Arc { + pub(super) fn directives_mut(&mut self) -> &mut DirectiveList { &mut self.data.directives } } @@ -1417,14 +1498,14 @@ mod fragment_spread_selection { pub(crate) type_condition_position: CompositeTypeDefinitionPosition, // directives applied on the fragment spread selection #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] - pub(crate) directives: Arc, + pub(crate) directives: DirectiveList, // directives applied within the fragment definition // // PORT_NOTE: The JS codebase combined the fragment spread's directives with the fragment // definition's directives. This was invalid GraphQL as those directives may not be applicable // on different locations. While we now keep track of those references, they are currently ignored. #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] - pub(crate) fragment_directives: Arc, + pub(crate) fragment_directives: DirectiveList, #[cfg_attr(not(feature = "snapshot_tracing"), serde(skip))] pub(crate) selection_id: SelectionId, } @@ -1436,11 +1517,9 @@ mod fragment_spread_selection { deferred_id: self.selection_id.clone(), } } else { - let mut directives = self.directives.as_ref().clone(); - sort_directives(&mut directives); SelectionKey::FragmentSpread { fragment_name: self.fragment_name.clone(), - directives: Arc::new(directives), + directives: self.directives.clone(), } } } @@ -1538,7 +1617,7 @@ impl FragmentSpreadData { schema: fragment.schema.clone(), fragment_name: fragment.name.clone(), type_condition_position: fragment.type_condition_position.clone(), - directives: Arc::new(spread_directives.clone()), + directives: spread_directives.clone().into(), fragment_directives: fragment.directives.clone(), selection_id: SelectionId::new(), } @@ -1549,16 +1628,14 @@ mod inline_fragment_selection { use std::hash::Hash; use std::hash::Hasher; use std::ops::Deref; - use std::sync::Arc; - use apollo_compiler::executable; use serde::Serialize; use crate::error::FederationError; use crate::link::graphql_definition::defer_directive_arguments; use crate::link::graphql_definition::DeferDirectiveArguments; use crate::operation::is_deferred_selection; - use crate::operation::sort_directives; + use crate::operation::DirectiveList; use crate::operation::HasSelectionKey; use crate::operation::SelectionId; use crate::operation::SelectionKey; @@ -1589,10 +1666,7 @@ mod inline_fragment_selection { } } - pub(crate) fn with_updated_directives( - &self, - directives: executable::DirectiveList, - ) -> Self { + pub(crate) fn with_updated_directives(&self, directives: impl Into) -> Self { Self { inline_fragment: self.inline_fragment.with_updated_directives(directives), selection_set: self.selection_set.clone(), @@ -1658,7 +1732,7 @@ mod inline_fragment_selection { &self.data } - pub(super) fn directives_mut(&mut self) -> &mut Arc { + pub(super) fn directives_mut(&mut self) -> &mut DirectiveList { &mut self.data.directives } @@ -1672,10 +1746,10 @@ mod inline_fragment_selection { } pub(crate) fn with_updated_directives( &self, - directives: executable::DirectiveList, + directives: impl Into, ) -> InlineFragment { let mut data = self.data().clone(); - data.directives = Arc::new(directives); + data.directives = directives.into(); Self::new(data) } @@ -1701,7 +1775,7 @@ mod inline_fragment_selection { pub(crate) parent_type_position: CompositeTypeDefinitionPosition, pub(crate) type_condition_position: Option, #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] - pub(crate) directives: Arc, + pub(crate) directives: DirectiveList, #[cfg_attr(not(feature = "snapshot_tracing"), serde(skip))] pub(crate) selection_id: SelectionId, } @@ -1731,14 +1805,12 @@ mod inline_fragment_selection { deferred_id: self.selection_id.clone(), } } else { - let mut directives = self.directives.as_ref().clone(); - sort_directives(&mut directives); SelectionKey::InlineFragment { type_condition: self .type_condition_position .as_ref() .map(|pos| pos.type_name().clone()), - directives: Arc::new(directives), + directives: self.directives.clone(), } } } @@ -2438,6 +2510,8 @@ impl SelectionSet { ) -> Result { let mut selection_map = SelectionMap::new(); if let Some(parent) = parent_type_if_abstract { + // XXX(@goto-bus-stop): if the selection set has an *alias* named __typename for some + // other field, this doesn't work right. is that allowed? if !self.has_top_level_typename_field() { let typename_selection = Selection::from_field( Field::new_introspection_typename(&self.schema, &parent.into(), None), @@ -2472,16 +2546,12 @@ impl SelectionSet { } fn has_top_level_typename_field(&self) -> bool { - // Needs to be behind a OnceLock because `Arc::new` is non-const. - // XXX(@goto-bus-stop): Note this does *not* count `__typename @include(if: true)`. - // This seems wrong? But it's what JS does, too. - static TYPENAME_KEY: OnceLock = OnceLock::new(); - let key = TYPENAME_KEY.get_or_init(|| SelectionKey::Field { + const TYPENAME_KEY: SelectionKey = SelectionKey::Field { response_name: TYPENAME_FIELD, - directives: Arc::new(Default::default()), - }); + directives: DirectiveList::new(), + }; - self.selections.contains_key(key) + self.selections.contains_key(&TYPENAME_KEY) } /// Adds a path, and optional some selections following that path, to this selection map. @@ -2595,7 +2665,9 @@ impl SelectionSet { /// Removes the @defer directive from all selections without removing that selection. fn without_defer(&mut self) { for (_key, mut selection) in Arc::make_mut(&mut self.selections).iter_mut() { - Arc::make_mut(selection.get_directives_mut()).retain(|dir| dir.name != name!("defer")); + // TODO(@goto-bus-stop): doing this changes the key of the selection! + // We have to rebuild the selection map. + selection.get_directives_mut().remove_one("defer"); if let Some(set) = selection.get_selection_set_mut() { set.without_defer(); } @@ -3121,8 +3193,8 @@ impl FieldSelection { schema: schema.clone(), field_position, alias: field.alias.clone(), - arguments: Arc::new(field.arguments.clone()), - directives: Arc::new(field.directives.clone()), + arguments: field.arguments.clone().into(), + directives: field.directives.clone().into(), sibling_typename: None, }), selection_set: if is_composite { @@ -3237,7 +3309,7 @@ impl InlineFragmentSelection { schema: schema.clone(), parent_type_position: parent_type_position.clone(), type_condition_position, - directives: Arc::new(inline_fragment.directives.clone()), + directives: inline_fragment.directives.clone().into(), selection_id: SelectionId::new(), }); Ok(InlineFragmentSelection::new( @@ -3276,7 +3348,7 @@ impl InlineFragmentSelection { pub(crate) fn from_selection_set( parent_type_position: CompositeTypeDefinitionPosition, selection_set: SelectionSet, - directives: Arc, + directives: DirectiveList, ) -> Self { let inline_fragment_data = InlineFragmentData { schema: selection_set.schema.clone(), @@ -3690,7 +3762,7 @@ impl TryFrom<&Operation> for executable::Operation { operation_type, name: normalized_operation.name.clone(), variables: normalized_operation.variables.deref().clone(), - directives: normalized_operation.directives.deref().clone(), + directives: normalized_operation.directives.iter().cloned().collect(), selection_set: (&normalized_operation.selection_set).try_into()?, }) } @@ -3702,7 +3774,7 @@ impl TryFrom<&Fragment> for executable::Fragment { fn try_from(normalized_fragment: &Fragment) -> Result { Ok(Self { name: normalized_fragment.name.clone(), - directives: normalized_fragment.directives.deref().clone(), + directives: normalized_fragment.directives.iter().cloned().collect(), selection_set: (&normalized_fragment.selection_set).try_into()?, }) } @@ -3773,7 +3845,7 @@ impl TryFrom<&Field> for executable::Field { alias: normalized_field.alias.to_owned(), name: normalized_field.name().to_owned(), arguments: normalized_field.arguments.deref().to_owned(), - directives: normalized_field.directives.deref().to_owned(), + directives: normalized_field.directives.iter().cloned().collect(), selection_set, }) } @@ -3807,7 +3879,11 @@ impl TryFrom<&InlineFragment> for executable::InlineFragment { }); Ok(Self { type_condition, - directives: normalized_inline_fragment.directives.deref().to_owned(), + directives: normalized_inline_fragment + .directives + .iter() + .cloned() + .collect(), selection_set: executable::SelectionSet { ty, selections: Vec::new(), @@ -3832,7 +3908,11 @@ impl From<&FragmentSpreadSelection> for executable::FragmentSpread { let normalized_fragment_spread = &val.spread; Self { fragment_name: normalized_fragment_spread.fragment_name.to_owned(), - directives: normalized_fragment_spread.directives.deref().to_owned(), + directives: normalized_fragment_spread + .directives + .iter() + .cloned() + .collect(), } } } @@ -4016,7 +4096,7 @@ pub(crate) fn normalize_operation( root_kind: operation.operation_type.into(), name: operation.name.clone(), variables: Arc::new(operation.variables.clone()), - directives: Arc::new(operation.directives.clone()), + directives: operation.directives.clone().into(), selection_set: normalized_selection_set, named_fragments, }; diff --git a/apollo-federation/src/operation/optimize.rs b/apollo-federation/src/operation/optimize.rs index 68f8178783..2bd4262e88 100644 --- a/apollo-federation/src/operation/optimize.rs +++ b/apollo-federation/src/operation/optimize.rs @@ -46,6 +46,7 @@ use apollo_compiler::Node; use super::Containment; use super::ContainmentOptions; +use super::DirectiveList; use super::Field; use super::FieldSelection; use super::Fragment; @@ -54,7 +55,6 @@ use super::InlineFragmentSelection; use super::NamedFragments; use super::Operation; use super::Selection; -use super::SelectionKey; use super::SelectionMapperReturn; use super::SelectionOrSet; use super::SelectionSet; @@ -749,10 +749,10 @@ impl Fragment { return false; } - self.selection_set.selections.iter().any(|(selection_key, _)| { + self.selection_set.selections.iter().any(|(_, selection)| { matches!( - selection_key, - SelectionKey::FragmentSpread {fragment_name, directives: _} if fragment_name == other_fragment_name, + selection, + Selection::FragmentSpread(fragment) if fragment.spread.fragment_name == *other_fragment_name ) }) } @@ -763,7 +763,7 @@ enum FullMatchingFragmentCondition<'a> { ForInlineFragmentSelection { // the type condition and directives on an inline fragment selection. type_condition_position: &'a CompositeTypeDefinitionPosition, - directives: &'a Arc, + directives: &'a DirectiveList, }, } @@ -3206,8 +3206,8 @@ mod tests { /// #[test] - #[should_panic(expected = "directive cannot be used on FRAGMENT_DEFINITION")] - // TODO: Investigate this restriction on query document in Rust version. + #[should_panic(expected = "directive is not supported for FRAGMENT_DEFINITION")] + // XXX(@goto-bus-stop): this test does not make sense, we should remove this feature fn reuse_fragments_with_same_directive_on_the_fragment() { let schema_doc = r#" type Query { @@ -3506,6 +3506,7 @@ mod tests { use apollo_compiler::name; use super::*; + use crate::operation::SelectionKey; const TEST_SCHEMA_FOR_EMPTY_BRANCH_REMOVAL: &str = r#" type Query { diff --git a/apollo-federation/src/operation/simplify.rs b/apollo-federation/src/operation/simplify.rs index 89fb42f110..8555b241a2 100644 --- a/apollo-federation/src/operation/simplify.rs +++ b/apollo-federation/src/operation/simplify.rs @@ -2,9 +2,9 @@ use std::sync::Arc; use apollo_compiler::executable; use apollo_compiler::name; -use apollo_compiler::Node; use super::runtime_types_intersect; +use super::DirectiveList; use super::Field; use super::FieldData; use super::FieldSelection; @@ -83,22 +83,18 @@ impl FieldSelection { // sub-selection is empty. Which suggest something may be wrong with this part of the query // intent, but the query was valid while keeping an empty sub-selection isn't. So in that // case, we just add some "non-included" __typename field just to keep the query valid. - let directives = - executable::DirectiveList(vec![Node::new(executable::Directive { - name: name!("include"), - arguments: vec![Node::new(executable::Argument { - name: name!("if"), - value: Node::new(executable::Value::Boolean(false)), - })], - })]); + let directives = DirectiveList::one(executable::Directive { + name: name!("include"), + arguments: vec![(name!("if"), false).into()], + }); let non_included_typename = Selection::from_field( Field::new(FieldData { schema: schema.clone(), field_position: field_composite_type_position .introspection_typename_field(), alias: None, - arguments: Arc::new(vec![]), - directives: Arc::new(directives), + arguments: Default::default(), + directives, sibling_typename: None, }), None, @@ -224,14 +220,10 @@ impl InlineFragmentSelection { // We should be able to rebase, or there is a bug, so error if that is the case. // If we rebased successfully then we add "non-included" __typename field selection // just to keep the query valid. - let directives = - executable::DirectiveList(vec![Node::new(executable::Directive { - name: name!("include"), - arguments: vec![Node::new(executable::Argument { - name: name!("if"), - value: Node::new(executable::Value::Boolean(false)), - })], - })]); + let directives = DirectiveList::one(executable::Directive { + name: name!("include"), + arguments: vec![(name!("if"), false).into()], + }); let parent_typename_field = if let Some(condition) = this_condition { condition.introspection_typename_field() } else { @@ -242,8 +234,8 @@ impl InlineFragmentSelection { schema: schema.clone(), field_position: parent_typename_field, alias: None, - arguments: Arc::new(vec![]), - directives: Arc::new(directives), + arguments: Default::default(), + directives, sibling_typename: None, }), None, diff --git a/apollo-federation/src/operation/tests/mod.rs b/apollo-federation/src/operation/tests/mod.rs index d90760d341..ed69e54d71 100644 --- a/apollo-federation/src/operation/tests/mod.rs +++ b/apollo-federation/src/operation/tests/mod.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use apollo_compiler::collections::IndexSet; use apollo_compiler::name; use apollo_compiler::schema::Schema; @@ -40,27 +38,7 @@ pub(super) fn parse_schema(schema_doc: &str) -> ValidFederationSchema { } pub(super) fn parse_operation(schema: &ValidFederationSchema, query: &str) -> Operation { - let executable_document = apollo_compiler::ExecutableDocument::parse_and_validate( - schema.schema(), - query, - "query.graphql", - ) - .unwrap(); - let operation = executable_document.operations.get(None).unwrap(); - let named_fragments = NamedFragments::new(&executable_document.fragments, schema); - let selection_set = - SelectionSet::from_selection_set(&operation.selection_set, &named_fragments, schema) - .unwrap(); - - Operation { - schema: schema.clone(), - root_kind: operation.operation_type.into(), - name: operation.name.clone(), - variables: Arc::new(operation.variables.clone()), - directives: Arc::new(operation.directives.clone()), - selection_set, - named_fragments, - } + Operation::parse(schema.clone(), query, "query.graphql", None).unwrap() } /// Parse and validate the query similarly to `parse_operation`, but does not construct the diff --git a/apollo-federation/src/query_graph/graph_path.rs b/apollo-federation/src/query_graph/graph_path.rs index 3c75c66b78..b9486f8434 100644 --- a/apollo-federation/src/query_graph/graph_path.rs +++ b/apollo-federation/src/query_graph/graph_path.rs @@ -11,7 +11,6 @@ use std::sync::Arc; use apollo_compiler::ast::Value; use apollo_compiler::collections::IndexMap; use apollo_compiler::collections::IndexSet; -use apollo_compiler::executable::DirectiveList; use itertools::Itertools; use petgraph::graph::EdgeIndex; use petgraph::graph::NodeIndex; @@ -30,6 +29,7 @@ use crate::link::graphql_definition::BooleanOrVariable; use crate::link::graphql_definition::DeferDirectiveArguments; use crate::link::graphql_definition::OperationConditional; use crate::link::graphql_definition::OperationConditionalKind; +use crate::operation::DirectiveList; use crate::operation::Field; use crate::operation::FieldData; use crate::operation::HasSelectionKey; @@ -310,7 +310,7 @@ impl HasSelectionKey for OpPathElement { } impl OpPathElement { - pub(crate) fn directives(&self) -> &Arc { + pub(crate) fn directives(&self) -> &DirectiveList { match self { OpPathElement::Field(field) => &field.directives, OpPathElement::InlineFragment(inline_fragment) => &inline_fragment.directives, @@ -427,6 +427,7 @@ impl OpPathElement { match self { Self::Field(_) => Some(self.clone()), // unchanged Self::InlineFragment(inline_fragment) => { + // TODO(@goto-bus-stop): is this not exactly the wrong way around? let updated_directives: DirectiveList = inline_fragment .directives .get_all("defer") @@ -3677,18 +3678,16 @@ impl OpPath { } pub(crate) fn conditional_directives(&self) -> DirectiveList { - DirectiveList( - self.0 - .iter() - .flat_map(|path_element| { - path_element - .directives() - .iter() - .filter(|d| d.name == "include" || d.name == "skip") - }) - .cloned() - .collect(), - ) + self.0 + .iter() + .flat_map(|path_element| { + path_element + .directives() + .iter() + .filter(|d| d.name == "include" || d.name == "skip") + }) + .cloned() + .collect() } /// Filter any fragment element in the provided path whose type condition does not exist in the provided schema. @@ -3837,7 +3836,6 @@ fn is_useless_followup_element( mod tests { use std::sync::Arc; - use apollo_compiler::executable::DirectiveList; use apollo_compiler::Name; use apollo_compiler::Schema; use petgraph::stable_graph::EdgeIndex; @@ -3850,7 +3848,6 @@ mod tests { use crate::query_graph::graph_path::OpGraphPath; use crate::query_graph::graph_path::OpGraphPathTrigger; use crate::query_graph::graph_path::OpPathElement; - use crate::schema::position::FieldDefinitionPosition; use crate::schema::position::ObjectFieldDefinitionPosition; use crate::schema::ValidFederationSchema; @@ -3881,14 +3878,7 @@ mod tests { type_name: Name::new("T").unwrap(), field_name: Name::new("t").unwrap(), }; - let data = FieldData { - schema: schema.clone(), - field_position: FieldDefinitionPosition::Object(pos), - alias: None, - arguments: Arc::new(Vec::new()), - directives: Arc::new(DirectiveList::new()), - sibling_typename: None, - }; + let data = FieldData::from_position(&schema, pos.into()); let trigger = OpGraphPathTrigger::OpPathElement(OpPathElement::Field(Field::new(data))); let path = path .add( @@ -3906,14 +3896,7 @@ mod tests { type_name: Name::new("ID").unwrap(), field_name: Name::new("id").unwrap(), }; - let data = FieldData { - schema, - field_position: FieldDefinitionPosition::Object(pos), - alias: None, - arguments: Arc::new(Vec::new()), - directives: Arc::new(DirectiveList::new()), - sibling_typename: None, - }; + let data = FieldData::from_position(&schema, pos.into()); let trigger = OpGraphPathTrigger::OpPathElement(OpPathElement::Field(Field::new(data))); let path = path .add( diff --git a/apollo-federation/src/query_graph/path_tree.rs b/apollo-federation/src/query_graph/path_tree.rs index 3411458f89..02812159a3 100644 --- a/apollo-federation/src/query_graph/path_tree.rs +++ b/apollo-federation/src/query_graph/path_tree.rs @@ -464,7 +464,6 @@ where mod tests { use std::sync::Arc; - use apollo_compiler::executable::DirectiveList; use apollo_compiler::ExecutableDocument; use petgraph::stable_graph::NodeIndex; use petgraph::visit::EdgeRef; @@ -542,8 +541,8 @@ mod tests { schema: query_graph.schema().unwrap().clone(), field_position: field_def.clone(), alias: None, - arguments: Arc::new(Vec::new()), - directives: Arc::new(DirectiveList::new()), + arguments: Default::default(), + directives: Default::default(), sibling_typename: None, }; let trigger = OpGraphPathTrigger::OpPathElement(OpPathElement::Field(Field::new(data))); diff --git a/apollo-federation/src/query_plan/conditions.rs b/apollo-federation/src/query_plan/conditions.rs index ab84d2d8ca..d4a84b9f49 100644 --- a/apollo-federation/src/query_plan/conditions.rs +++ b/apollo-federation/src/query_plan/conditions.rs @@ -2,7 +2,6 @@ use std::sync::Arc; use apollo_compiler::ast::Directive; use apollo_compiler::collections::IndexMap; -use apollo_compiler::executable::DirectiveList; use apollo_compiler::executable::Value; use apollo_compiler::Name; use apollo_compiler::Node; @@ -10,6 +9,7 @@ use indexmap::map::Entry; use serde::Serialize; use crate::error::FederationError; +use crate::operation::DirectiveList; use crate::operation::Selection; use crate::operation::SelectionMap; use crate::operation::SelectionSet; @@ -93,7 +93,7 @@ impl Conditions { pub(crate) fn from_directives(directives: &DirectiveList) -> Result { let mut variables = IndexMap::default(); - for directive in directives { + for directive in directives.iter_sorted() { let negated = match directive.name.as_str() { "include" => false, "skip" => true, @@ -285,8 +285,8 @@ pub(crate) fn remove_unneeded_top_level_fragment_directives( } // We can skip some of the fragment directives directive. - let final_selection = - inline_fragment.with_updated_directives(DirectiveList(needed_directives)); + let final_selection = inline_fragment + .with_updated_directives(DirectiveList::from_iter(needed_directives)); selection_map.insert(Selection::InlineFragment(Arc::new(final_selection))); } } @@ -308,19 +308,17 @@ fn remove_conditions_of_element( element: OpPathElement, conditions: &VariableConditions, ) -> OpPathElement { - let updated_directives: DirectiveList = DirectiveList( - element - .directives() - .iter() - .filter(|d| { - !matches_condition_for_kind(d, conditions, ConditionKind::Include) - && !matches_condition_for_kind(d, conditions, ConditionKind::Skip) - }) - .cloned() - .collect(), - ); + let updated_directives: DirectiveList = element + .directives() + .iter() + .filter(|d| { + !matches_condition_for_kind(d, conditions, ConditionKind::Include) + && !matches_condition_for_kind(d, conditions, ConditionKind::Skip) + }) + .cloned() + .collect(); - if updated_directives.0.len() == element.directives().len() { + if updated_directives.len() == element.directives().len() { element } else { element.with_updated_directives(updated_directives) diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index d14e57bad1..b1bbab0ebf 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -12,7 +12,6 @@ use apollo_compiler::ast::Type; use apollo_compiler::collections::IndexMap; use apollo_compiler::collections::IndexSet; use apollo_compiler::executable; -use apollo_compiler::executable::DirectiveList; use apollo_compiler::executable::VariableDefinition; use apollo_compiler::name; use apollo_compiler::schema; @@ -31,7 +30,9 @@ use super::query_planner::SubgraphOperationCompression; use crate::error::FederationError; use crate::error::SingleFederationError; use crate::link::graphql_definition::DeferDirectiveArguments; +use crate::operation::ArgumentList; use crate::operation::ContainmentOptions; +use crate::operation::DirectiveList; use crate::operation::Field; use crate::operation::FieldData; use crate::operation::InlineFragment; @@ -2407,7 +2408,7 @@ impl FetchDependencyGraphNode { query_graph: &QueryGraph, handled_conditions: &Conditions, variable_definitions: &[Node], - operation_directives: &Arc, + operation_directives: &DirectiveList, operation_compression: &mut SubgraphOperationCompression, operation_name: Option, ) -> Result, FederationError> { @@ -2708,7 +2709,7 @@ fn operation_for_entities_fetch( subgraph_schema: &ValidFederationSchema, selection_set: SelectionSet, mut variable_definitions: Vec>, - operation_directives: &Arc, + operation_directives: &DirectiveList, operation_name: &Option, ) -> Result { variable_definitions.insert(0, representations_variable_definition(subgraph_schema)?); @@ -2746,11 +2747,10 @@ fn operation_for_entities_fetch( schema: subgraph_schema.clone(), field_position: entities, alias: None, - arguments: Arc::new(vec![executable::Argument { - name: FEDERATION_REPRESENTATIONS_ARGUMENTS_NAME, - value: executable::Value::Variable(FEDERATION_REPRESENTATIONS_VAR_NAME).into(), - } - .into()]), + arguments: ArgumentList::one(( + FEDERATION_REPRESENTATIONS_ARGUMENTS_NAME, + executable::Value::Variable(FEDERATION_REPRESENTATIONS_VAR_NAME), + )), directives: Default::default(), sibling_typename: None, })), @@ -2775,7 +2775,7 @@ fn operation_for_entities_fetch( root_kind: SchemaRootDefinitionKind::Query, name: operation_name.clone(), variables: Arc::new(variable_definitions), - directives: Arc::clone(operation_directives), + directives: operation_directives.clone(), selection_set, named_fragments: Default::default(), }) @@ -2786,7 +2786,7 @@ fn operation_for_query_fetch( root_kind: SchemaRootDefinitionKind, selection_set: SelectionSet, variable_definitions: Vec>, - operation_directives: &Arc, + operation_directives: &DirectiveList, operation_name: &Option, ) -> Result { Ok(Operation { @@ -2794,7 +2794,7 @@ fn operation_for_query_fetch( root_kind, name: operation_name.clone(), variables: Arc::new(variable_definitions), - directives: Arc::clone(operation_directives), + directives: operation_directives.clone(), selection_set, named_fragments: Default::default(), }) @@ -3715,7 +3715,7 @@ fn wrap_selection_with_type_and_conditions( schema: supergraph_schema.clone(), parent_type_position: wrapping_type.clone(), type_condition_position: Some(type_condition.clone()), - directives: Arc::new([directive].into_iter().collect()), + directives: [directive].into_iter().collect(), selection_id: SelectionId::new(), }), acc, diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs b/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs index 75f945e2b2..ab126dbcc6 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs @@ -1,13 +1,13 @@ use std::sync::Arc; use apollo_compiler::collections::IndexSet; -use apollo_compiler::executable::DirectiveList; use apollo_compiler::executable::VariableDefinition; use apollo_compiler::Name; use apollo_compiler::Node; use super::query_planner::SubgraphOperationCompression; use crate::error::FederationError; +use crate::operation::DirectiveList; use crate::operation::SelectionSet; use crate::query_graph::QueryGraph; use crate::query_plan::conditions::Conditions; @@ -47,7 +47,7 @@ const PIPELINING_COST: QueryPlanCost = 100.0; pub(crate) struct FetchDependencyGraphToQueryPlanProcessor { variable_definitions: Arc>>, - operation_directives: Arc, + operation_directives: DirectiveList, operation_compression: SubgraphOperationCompression, operation_name: Option, assigned_defer_labels: Option>, @@ -245,7 +245,7 @@ fn sequence_cost(values: impl IntoIterator) -> QueryPlanCo impl FetchDependencyGraphToQueryPlanProcessor { pub(crate) fn new( variable_definitions: Arc>>, - operation_directives: Arc, + operation_directives: DirectiveList, operation_compression: SubgraphOperationCompression, operation_name: Option, assigned_defer_labels: Option>, From 32fa87e426a49cb02ed266e05ad742c608457410 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 20 Aug 2024 10:38:29 +0200 Subject: [PATCH 078/108] add metrics tracking the V8 heap usage (#5781) This adds new gauge metrics tracking V8 memory usage: * apollo.router.v8.heap.used * apollo.router.v8.heap.total Co-authored-by: Edward Huang --- .changesets/feat_geal_v8_heap_statistics.md | 7 + .changesets/feat_update_federation.md | 8 + Cargo.lock | 4 +- apollo-router/Cargo.toml | 2 +- .../bridge_query_planner_pool.rs | 154 +++++++++++++++++- apollo-router/tests/integration/redis.rs | 14 +- .../instrumentation/standard-instruments.mdx | 2 + fuzz/Cargo.toml | 2 +- 8 files changed, 179 insertions(+), 14 deletions(-) create mode 100644 .changesets/feat_geal_v8_heap_statistics.md create mode 100644 .changesets/feat_update_federation.md diff --git a/.changesets/feat_geal_v8_heap_statistics.md b/.changesets/feat_geal_v8_heap_statistics.md new file mode 100644 index 0000000000..c091b108a8 --- /dev/null +++ b/.changesets/feat_geal_v8_heap_statistics.md @@ -0,0 +1,7 @@ +### Add V8 heap usage metrics ([PR #5781](https://github.com/apollographql/router/pull/5781)) + +The router supports new gauge metrics for tracking heap memory usage of the V8 Javascript engine: +- `apollo.router.v8.heap.used`: heap memory used by V8, in bytes +- `apollo.router.v8.heap.total`: total heap allocated by V8, in bytes + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5781 \ No newline at end of file diff --git a/.changesets/feat_update_federation.md b/.changesets/feat_update_federation.md new file mode 100644 index 0000000000..b3c0670daa --- /dev/null +++ b/.changesets/feat_update_federation.md @@ -0,0 +1,8 @@ +### Update federation to 2.8.3 ([PR #5781](https://github.com/apollographql/router/pull/5781)) + +> [!IMPORTANT] +> If you have enabled [Distributed query plan caching](https://www.apollographql.com/docs/router/configuration/distributed-caching/#distributed-query-plan-caching), this release changes the hashing algorithm used for the cache keys. On account of this, you should anticipate additional cache regeneration cost when updating between these versions while the new hashing algorithm comes into service. + +This updates the router from federation version 2.8.1 to 2.8.3, with a [fix for fragment generation](https://github.com/apollographql/federation/pull/3043). + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5781 diff --git a/Cargo.lock b/Cargo.lock index c20249f7e7..11a516f5dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6092,9 +6092,9 @@ dependencies = [ [[package]] name = "router-bridge" -version = "0.5.27+v2.8.1" +version = "0.5.30+v2.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "288fa40fc4e0a76fb911410e05d4525e8bf7558622bd02403f89f871c4d0785b" +checksum = "9b2b67ccfc13842df12e473cbb93fe306a8dc3d120cfa2be57e3537c71bf0e63" dependencies = [ "anyhow", "async-channel 1.9.0", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 5716453c60..dbf481bf5a 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -198,7 +198,7 @@ regex = "1.10.5" reqwest.workspace = true # note: this dependency should _always_ be pinned, prefix the version with an `=` -router-bridge = "=0.5.27+v2.8.1" +router-bridge = "=0.5.30+v2.8.3" rust-embed = { version = "8.4.0", features = ["include-exclude"] } rustls = "0.21.12" diff --git a/apollo-router/src/query_planner/bridge_query_planner_pool.rs b/apollo-router/src/query_planner/bridge_query_planner_pool.rs index a306f19b6b..bb75124df1 100644 --- a/apollo-router/src/query_planner/bridge_query_planner_pool.rs +++ b/apollo-router/src/query_planner/bridge_query_planner_pool.rs @@ -1,5 +1,7 @@ use std::collections::HashMap; use std::num::NonZeroUsize; +use std::sync::atomic::AtomicU64; +use std::sync::atomic::Ordering; use std::sync::Arc; use std::time::Instant; @@ -8,6 +10,9 @@ use async_channel::bounded; use async_channel::Sender; use futures::future::BoxFuture; use opentelemetry::metrics::MeterProvider; +use opentelemetry::metrics::ObservableGauge; +use opentelemetry::metrics::Unit; +use opentelemetry_api::metrics::Meter; use router_bridge::planner::Planner; use tokio::sync::oneshot; use tokio::task::JoinSet; @@ -37,6 +42,10 @@ pub(crate) struct BridgeQueryPlannerPool { schema: Arc, subgraph_schemas: Arc>>>, _pool_size_gauge: opentelemetry::metrics::ObservableGauge, + v8_heap_used: Arc, + _v8_heap_used_gauge: ObservableGauge, + v8_heap_total: Arc, + _v8_heap_total_gauge: ObservableGauge, } impl BridgeQueryPlannerPool { @@ -93,7 +102,7 @@ impl BridgeQueryPlannerPool { })? .subgraph_schemas(); - let planners = bridge_query_planners + let planners: Vec<_> = bridge_query_planners .iter() .map(|p| p.planner().clone()) .collect(); @@ -119,21 +128,68 @@ impl BridgeQueryPlannerPool { }); } let sender_for_gauge = sender.clone(); - let pool_size_gauge = meter_provider() - .meter("apollo/router") + let meter = meter_provider().meter("apollo/router"); + let pool_size_gauge = meter .u64_observable_gauge("apollo.router.query_planning.queued") + .with_description("Number of queries waiting to be planned") + .with_unit(Unit::new("query")) .with_callback(move |m| m.observe(sender_for_gauge.len() as u64, &[])) .init(); + let (v8_heap_used, _v8_heap_used_gauge) = Self::create_heap_used_gauge(&meter); + let (v8_heap_total, _v8_heap_total_gauge) = Self::create_heap_total_gauge(&meter); + + // initialize v8 metrics + if let Some(bridge_query_planner) = planners.first().cloned() { + Self::get_v8_metrics( + bridge_query_planner, + v8_heap_used.clone(), + v8_heap_total.clone(), + ) + .await; + } + Ok(Self { js_planners: planners, sender, schema, subgraph_schemas, _pool_size_gauge: pool_size_gauge, + v8_heap_used, + _v8_heap_used_gauge, + v8_heap_total, + _v8_heap_total_gauge, }) } + fn create_heap_used_gauge(meter: &Meter) -> (Arc, ObservableGauge) { + let current_heap_used = Arc::new(AtomicU64::new(0)); + let current_heap_used_for_gauge = current_heap_used.clone(); + let heap_used_gauge = meter + .u64_observable_gauge("apollo.router.v8.heap.used") + .with_description("V8 heap used, in bytes") + .with_unit(Unit::new("By")) + .with_callback(move |i| { + i.observe(current_heap_used_for_gauge.load(Ordering::SeqCst), &[]) + }) + .init(); + (current_heap_used, heap_used_gauge) + } + + fn create_heap_total_gauge(meter: &Meter) -> (Arc, ObservableGauge) { + let current_heap_total = Arc::new(AtomicU64::new(0)); + let current_heap_total_for_gauge = current_heap_total.clone(); + let heap_total_gauge = meter + .u64_observable_gauge("apollo.router.v8.heap.total") + .with_description("V8 heap total, in bytes") + .with_unit(Unit::new("By")) + .with_callback(move |i| { + i.observe(current_heap_total_for_gauge.load(Ordering::SeqCst), &[]) + }) + .init(); + (current_heap_total, heap_total_gauge) + } + pub(crate) fn planners(&self) -> Vec>> { self.js_planners.clone() } @@ -147,6 +203,18 @@ impl BridgeQueryPlannerPool { ) -> Arc>>> { self.subgraph_schemas.clone() } + + async fn get_v8_metrics( + planner: Arc>, + v8_heap_used: Arc, + v8_heap_total: Arc, + ) { + let metrics = planner.get_heap_statistics().await; + if let Ok(metrics) = metrics { + v8_heap_used.store(metrics.heap_used, Ordering::SeqCst); + v8_heap_total.store(metrics.heap_total, Ordering::SeqCst); + } + } } impl tower::Service for BridgeQueryPlannerPool { @@ -173,6 +241,20 @@ impl tower::Service for BridgeQueryPlannerPool { let (response_sender, response_receiver) = oneshot::channel(); let sender = self.sender.clone(); + let get_metrics_future = + if let Some(bridge_query_planner) = self.js_planners.first().cloned() { + let v8_heap_used = self.v8_heap_used.clone(); + let v8_heap_total = self.v8_heap_total.clone(); + + Some(Self::get_v8_metrics( + bridge_query_planner, + v8_heap_used, + v8_heap_total, + )) + } else { + None + }; + Box::pin(async move { let start = Instant::now(); let _ = sender.send((req, response_sender)).await; @@ -187,7 +269,73 @@ impl tower::Service for BridgeQueryPlannerPool { start.elapsed().as_secs_f64() ); + if let Some(f) = get_metrics_future { + // execute in a separate task to avoid blocking the request + tokio::task::spawn(f); + } + res }) } } + +#[cfg(test)] + +mod tests { + use opentelemetry_sdk::metrics::data::Gauge; + + use super::*; + use crate::metrics::FutureMetricsExt; + use crate::spec::Query; + use crate::Context; + + #[tokio::test] + async fn test_v8_metrics() { + let sdl = include_str!("../testdata/supergraph.graphql"); + let config = Arc::default(); + let schema = Arc::new(Schema::parse(sdl, &config).unwrap()); + + async move { + let mut pool = BridgeQueryPlannerPool::new( + schema.clone(), + config.clone(), + NonZeroUsize::new(2).unwrap(), + ) + .await + .unwrap(); + let query = "query { me { name } }".to_string(); + + let doc = Query::parse_document(&query, None, &schema, &config).unwrap(); + let context = Context::new(); + context.extensions().with_lock(|mut lock| lock.insert(doc)); + + pool.call(QueryPlannerRequest::new(query, None, context)) + .await + .unwrap(); + + let metrics = crate::metrics::collect_metrics(); + let heap_used = metrics.find("apollo.router.v8.heap.used").unwrap(); + let heap_total = metrics.find("apollo.router.v8.heap.total").unwrap(); + + println!( + "got heap_used: {:?}, heap_total: {:?}", + heap_used + .data + .as_any() + .downcast_ref::>() + .unwrap() + .data_points[0] + .value, + heap_total + .data + .as_any() + .downcast_ref::>() + .unwrap() + .data_points[0] + .value + ); + } + .with_metrics() + .await; + } +} diff --git a/apollo-router/tests/integration/redis.rs b/apollo-router/tests/integration/redis.rs index cb8b79959e..6b0ff6b404 100644 --- a/apollo-router/tests/integration/redis.rs +++ b/apollo-router/tests/integration/redis.rs @@ -26,7 +26,7 @@ async fn query_planner_cache() -> Result<(), BoxError> { // 2. run `docker compose up -d` and connect to the redis container by running `docker-compose exec redis /bin/bash`. // 3. Run the `redis-cli` command from the shell and start the redis `monitor` command. // 4. Run this test and yank the updated cache key from the redis logs. - let known_cache_key = "plan:0:v2.8.1:16385ebef77959fcdc520ad507eb1f7f7df28f1d54a0569e3adabcb4cd00d7ce:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:3106dfc3339d8c3f3020434024bff0f566a8be5995199954db5a7525a7d7e67a"; + let known_cache_key = "plan:0:v2.8.3:16385ebef77959fcdc520ad507eb1f7f7df28f1d54a0569e3adabcb4cd00d7ce:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:3106dfc3339d8c3f3020434024bff0f566a8be5995199954db5a7525a7d7e67a"; let config = RedisConfig::from_url("redis://127.0.0.1:6379").unwrap(); let client = RedisClient::new(config, None, None, None); @@ -921,7 +921,7 @@ async fn connection_failure_blocks_startup() { async fn query_planner_redis_update_query_fragments() { test_redis_query_plan_config_update( include_str!("fixtures/query_planner_redis_config_update_query_fragments.router.yaml"), - "plan:0:v2.8.1:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:9054d19854e1d9e282ac7645c612bc70b8a7143d43b73d44dade4a5ec43938b4", + "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:9054d19854e1d9e282ac7645c612bc70b8a7143d43b73d44dade4a5ec43938b4", ) .await; } @@ -940,7 +940,7 @@ async fn query_planner_redis_update_planner_mode() { async fn query_planner_redis_update_introspection() { test_redis_query_plan_config_update( include_str!("fixtures/query_planner_redis_config_update_introspection.router.yaml"), - "plan:0:v2.8.1:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:04b3051125b5994fba6b0a22b2d8b4246cadc145be030c491a3431655d2ba07a", + "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:04b3051125b5994fba6b0a22b2d8b4246cadc145be030c491a3431655d2ba07a", ) .await; } @@ -949,7 +949,7 @@ async fn query_planner_redis_update_introspection() { async fn query_planner_redis_update_defer() { test_redis_query_plan_config_update( include_str!("fixtures/query_planner_redis_config_update_defer.router.yaml"), - "plan:0:v2.8.1:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:3b7241b0db2cd878b79c0810121953ba544543f3cb2692aaf1a59184470747b0", + "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:3b7241b0db2cd878b79c0810121953ba544543f3cb2692aaf1a59184470747b0", ) .await; } @@ -960,7 +960,7 @@ async fn query_planner_redis_update_type_conditional_fetching() { include_str!( "fixtures/query_planner_redis_config_update_type_conditional_fetching.router.yaml" ), - "plan:0:v2.8.1:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:0ca695a8c4c448b65fa04229c663f44150af53b184ebdcbb0ad6862290efed76", + "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:0ca695a8c4c448b65fa04229c663f44150af53b184ebdcbb0ad6862290efed76", ) .await; } @@ -971,7 +971,7 @@ async fn query_planner_redis_update_reuse_query_fragments() { include_str!( "fixtures/query_planner_redis_config_update_reuse_query_fragments.router.yaml" ), - "plan:0:v2.8.1:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:f7c04319556397ec4b550aa5aaa96c73689cee09026b661b6a9fc20b49e6fa77", + "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:f7c04319556397ec4b550aa5aaa96c73689cee09026b661b6a9fc20b49e6fa77", ) .await; } @@ -994,7 +994,7 @@ async fn test_redis_query_plan_config_update(updated_config: &str, new_cache_key router.assert_started().await; router.clear_redis_cache().await; - let starting_key = "plan:0:v2.8.1:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:4a5827854a6d2efc85045f0d5bede402e15958390f1073d2e77df56188338e5a"; + let starting_key = "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:4a5827854a6d2efc85045f0d5bede402e15958390f1073d2e77df56188338e5a"; router.execute_default_query().await; router.assert_redis_cache_contains(starting_key, None).await; router.update_config(updated_config).await; diff --git a/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx b/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx index 37c63e8b57..d29cbf1fca 100644 --- a/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx +++ b/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx @@ -66,6 +66,8 @@ The coprocessor operations metric has the following attributes: - `apollo.router.query_planning.plan.duration` - Histogram of plan durations isolated to query planning time only. - `apollo.router.query_planning.total.duration` - Histogram of plan durations including queue time. - `apollo.router.query_planning.queued` - A gauge of the number of queued plans requests. +- `apollo.router.v8.heap.used` - heap memory used by V8, in bytes. +- `apollo.router.v8.heap.total` - total heap allocated by V8, in bytes. ### Uplink diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index 451ea09375..781b40cc11 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -20,7 +20,7 @@ reqwest = { workspace = true, features = ["json", "blocking"] } serde_json.workspace = true tokio.workspace = true # note: this dependency should _always_ be pinned, prefix the version with an `=` -router-bridge = "=0.5.27+v2.8.1" +router-bridge = "=0.5.30+v2.8.3" [dev-dependencies] anyhow = "1" From 5981ddbad0d3c14579157e6b52124e297fcd4929 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 20 Aug 2024 11:26:52 +0200 Subject: [PATCH 079/108] small optimizations (#5765) --- apollo-router/src/plugins/csrf.rs | 9 +-- apollo-router/src/plugins/headers.rs | 99 +++++++++++++++++----------- 2 files changed, 66 insertions(+), 42 deletions(-) diff --git a/apollo-router/src/plugins/csrf.rs b/apollo-router/src/plugins/csrf.rs index 6e0f08e118..5c76c57116 100644 --- a/apollo-router/src/plugins/csrf.rs +++ b/apollo-router/src/plugins/csrf.rs @@ -1,5 +1,6 @@ //! Cross Site Request Forgery (CSRF) plugin. use std::ops::ControlFlow; +use std::sync::Arc; use http::header; use http::HeaderMap; @@ -35,14 +36,14 @@ pub(crate) struct CSRFConfig { /// - did not set any `allow_headers` list (so it defaults to `mirror_request`) /// - added your required headers to the allow_headers list, as shown in the /// `examples/cors-and-csrf/custom-headers.router.yaml` files. - required_headers: Vec, + required_headers: Arc>, } -fn apollo_custom_preflight_headers() -> Vec { - vec![ +fn apollo_custom_preflight_headers() -> Arc> { + Arc::new(vec![ "x-apollo-operation-name".to_string(), "apollo-require-preflight".to_string(), - ] + ]) } impl Default for CSRFConfig { diff --git a/apollo-router/src/plugins/headers.rs b/apollo-router/src/plugins/headers.rs index 2f19a965ff..1e52cd444c 100644 --- a/apollo-router/src/plugins/headers.rs +++ b/apollo-router/src/plugins/headers.rs @@ -193,6 +193,7 @@ struct Config { struct Headers { all_operations: Arc>, subgraph_operations: HashMap>>, + reserved_headers: Arc>, } #[async_trait::async_trait] @@ -220,6 +221,7 @@ impl Plugin for Headers { Ok(Headers { all_operations: Arc::new(operations), subgraph_operations, + reserved_headers: Arc::new(RESERVED_HEADERS.iter().collect()), }) } @@ -230,6 +232,7 @@ impl Plugin for Headers { .get(name) .cloned() .unwrap_or_else(|| self.all_operations.clone()), + self.reserved_headers.clone(), )) .service(service) .boxed() @@ -242,10 +245,13 @@ struct HeadersLayer { } impl HeadersLayer { - fn new(operations: Arc>) -> Self { + fn new( + operations: Arc>, + reserved_headers: Arc>, + ) -> Self { Self { operations, - reserved_headers: Arc::new(RESERVED_HEADERS.iter().collect()), + reserved_headers, } } } @@ -583,12 +589,13 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(Arc::new(vec![Operation::Insert(Insert::Static( - InsertStatic { + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Insert(Insert::Static(InsertStatic { name: "c".try_into()?, value: "d".try_into()?, - }, - ))])) + }))]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) .layer(mock); service.ready().await?.call(example_request()).await?; @@ -610,12 +617,15 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(Arc::new(vec![Operation::Insert( - Insert::FromContext(InsertFromContext { - name: "header_from_context".try_into()?, - from_context: "my_key".to_string(), - }), - )])) + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Insert(Insert::FromContext( + InsertFromContext { + name: "header_from_context".try_into()?, + from_context: "my_key".to_string(), + }, + ))]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) .layer(mock); service.ready().await?.call(example_request()).await?; @@ -637,13 +647,14 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(Arc::new(vec![Operation::Insert(Insert::FromBody( - InsertFromBody { + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Insert(Insert::FromBody(InsertFromBody { name: "header_from_request".try_into()?, path: JSONQuery::parse(".operationName")?, default: None, - }, - ))])) + }))]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) .layer(mock); service.ready().await?.call(example_request()).await?; @@ -658,9 +669,10 @@ mod test { .withf(|request| request.assert_headers(vec![("ac", "vac"), ("ab", "vab")])) .returning(example_response); - let mut service = HeadersLayer::new(Arc::new(vec![Operation::Remove(Remove::Named( - "aa".try_into()?, - ))])) + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Remove(Remove::Named("aa".try_into()?))]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) .layer(mock); service.ready().await?.call(example_request()).await?; @@ -675,9 +687,12 @@ mod test { .withf(|request| request.assert_headers(vec![("ac", "vac")])) .returning(example_response); - let mut service = HeadersLayer::new(Arc::new(vec![Operation::Remove(Remove::Matching( - Regex::from_str("a[ab]")?, - ))])) + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Remove(Remove::Matching(Regex::from_str( + "a[ab]", + )?))]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) .layer(mock); service.ready().await?.call(example_request()).await?; @@ -701,11 +716,13 @@ mod test { }) .returning(example_response); - let mut service = - HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Matching { + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Propagate(Propagate::Matching { matching: Regex::from_str("d[ab]")?, - })])) - .layer(mock); + })]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -726,13 +743,15 @@ mod test { }) .returning(example_response); - let mut service = - HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Named { + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Propagate(Propagate::Named { named: "da".try_into()?, rename: None, default: None, - })])) - .layer(mock); + })]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -753,13 +772,15 @@ mod test { }) .returning(example_response); - let mut service = - HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Named { + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Propagate(Propagate::Named { named: "da".try_into()?, rename: Some("ea".try_into()?), default: None, - })])) - .layer(mock); + })]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -780,13 +801,15 @@ mod test { }) .returning(example_response); - let mut service = - HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Named { + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Propagate(Propagate::Named { named: "ea".try_into()?, rename: None, default: Some("defaulted".try_into()?), - })])) - .layer(mock); + })]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) From 4e6773cfdec6fab4fb414b4e837a4c8bca2a6c17 Mon Sep 17 00:00:00 2001 From: Simon Sapin Date: Tue, 20 Aug 2024 12:55:49 +0200 Subject: [PATCH 080/108] =?UTF-8?q?Move=20QP=C2=A0compatibility=20checks?= =?UTF-8?q?=20into=20constructor=20and=20add=20metric=20(#5811)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Iryna Shestak Co-authored-by: Renée --- apollo-federation/src/error/mod.rs | 17 +- .../src/query_plan/query_planner.rs | 92 +++- apollo-federation/src/schema/field_set.rs | 3 +- apollo-router/src/error.rs | 10 +- .../src/query_planner/bridge_query_planner.rs | 99 +++- apollo-router/src/router_factory.rs | 236 --------- .../tests/fixtures/broken-supergraph.graphql | 127 +++++ .../tests/fixtures/valid-supergraph.graphql | 126 +++++ apollo-router/tests/integration/lifecycle.rs | 70 --- apollo-router/tests/integration/mod.rs | 1 + .../tests/integration/query_planner.rs | 466 ++++++++++++++++++ 11 files changed, 922 insertions(+), 325 deletions(-) create mode 100644 apollo-router/tests/fixtures/broken-supergraph.graphql create mode 100644 apollo-router/tests/fixtures/valid-supergraph.graphql create mode 100644 apollo-router/tests/integration/query_planner.rs diff --git a/apollo-federation/src/error/mod.rs b/apollo-federation/src/error/mod.rs index f13ec59757..555d1a4339 100644 --- a/apollo-federation/src/error/mod.rs +++ b/apollo-federation/src/error/mod.rs @@ -29,6 +29,18 @@ impl From for String { } } +#[derive(Clone, Debug, strum_macros::Display, PartialEq, Eq)] +pub enum UnsupportedFeatureKind { + #[strum(to_string = "progressive overrides")] + ProgressiveOverrides, + #[strum(to_string = "defer")] + Defer, + #[strum(to_string = "context")] + Context, + #[strum(to_string = "alias")] + Alias, +} + #[derive(Debug, Clone, thiserror::Error)] pub enum SingleFederationError { #[error( @@ -185,7 +197,10 @@ pub enum SingleFederationError { #[error("{message}")] OverrideOnInterface { message: String }, #[error("{message}")] - UnsupportedFeature { message: String }, + UnsupportedFeature { + message: String, + kind: UnsupportedFeatureKind, + }, #[error("{message}")] InvalidFederationSupergraph { message: String }, #[error("{message}")] diff --git a/apollo-federation/src/query_plan/query_planner.rs b/apollo-federation/src/query_plan/query_planner.rs index 84f392340e..5670c685d9 100644 --- a/apollo-federation/src/query_plan/query_planner.rs +++ b/apollo-federation/src/query_plan/query_planner.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use apollo_compiler::collections::IndexMap; use apollo_compiler::collections::IndexSet; +use apollo_compiler::schema::ExtendedType; use apollo_compiler::validation::Valid; use apollo_compiler::ExecutableDocument; use apollo_compiler::Name; @@ -46,6 +47,10 @@ use crate::utils::logging::snapshot; use crate::ApiSchemaOptions; use crate::Supergraph; +pub(crate) const OVERRIDE_LABEL_ARG_NAME: &str = "overrideLabel"; +pub(crate) const CONTEXT_DIRECTIVE: &str = "context"; +pub(crate) const JOIN_FIELD: &str = "join__field"; + #[derive(Debug, Clone, Hash)] pub struct QueryPlannerConfig { /// Whether the query planner should try to reused the named fragments of the planned query in @@ -208,6 +213,7 @@ impl QueryPlanner { config: QueryPlannerConfig, ) -> Result { config.assert_valid(); + Self::check_unsupported_features(supergraph)?; let supergraph_schema = supergraph.schema.clone(); let api_schema = supergraph.to_api_schema(ApiSchemaOptions { @@ -533,6 +539,89 @@ impl QueryPlanner { pub fn api_schema(&self) -> &ValidFederationSchema { &self.api_schema } + + fn check_unsupported_features(supergraph: &Supergraph) -> Result<(), FederationError> { + // We have a *progressive* override when `join__field` has a + // non-null value for `overrideLabel` field. + // + // This looks at object types' fields and their directive + // applications, looking specifically for `@join__field` + // arguments list. + let has_progressive_overrides = supergraph + .schema + .schema() + .types + .values() + .filter_map(|extended_type| { + // The override label args can be only on ObjectTypes + if let ExtendedType::Object(object_type) = extended_type { + Some(object_type) + } else { + None + } + }) + .flat_map(|object_type| &object_type.fields) + .flat_map(|(_, field)| { + field + .directives + .iter() + .filter(|d| d.name.as_str() == JOIN_FIELD) + }) + .any(|join_directive| { + if let Some(override_label_arg) = + join_directive.argument_by_name(OVERRIDE_LABEL_ARG_NAME) + { + // Any argument value for `overrideLabel` that's not + // null can be considered as progressive override usage + if !override_label_arg.is_null() { + return true; + } + return false; + } + false + }); + if has_progressive_overrides { + let message = "\ + `experimental_query_planner_mode: new` or `both` cannot yet \ + be used with progressive overrides. \ + Remove uses of progressive overrides to try the experimental query planner, \ + otherwise switch back to `legacy` or `both_best_effort`.\ + "; + return Err(SingleFederationError::UnsupportedFeature { + message: message.to_owned(), + kind: crate::error::UnsupportedFeatureKind::ProgressiveOverrides, + } + .into()); + } + + // We will only check for `@context` direcive, since + // `@fromContext` can only be used if `@context` is already + // applied, and we assume a correctly composed supergraph. + // + // `@context` can only be applied on Object Types, Interface + // Types and Unions. For simplicity of this function, we just + // check all 'extended_type` directives. + let has_set_context = supergraph + .schema + .schema() + .types + .values() + .any(|extended_type| extended_type.directives().has(CONTEXT_DIRECTIVE)); + if has_set_context { + let message = "\ + `experimental_query_planner_mode: new` or `both` cannot yet \ + be used with `@context`. \ + Remove uses of `@context` to try the experimental query planner, \ + otherwise switch back to `legacy` or `both_best_effort`.\ + "; + return Err(SingleFederationError::UnsupportedFeature { + message: message.to_owned(), + kind: crate::error::UnsupportedFeatureKind::Context, + } + .into()); + } + Ok(()) + } } fn compute_root_serial_dependency_graph( @@ -767,8 +856,9 @@ fn compute_plan_for_defer_conditionals( _parameters: &mut QueryPlanningParameters, _defer_conditions: IndexMap>, ) -> Result, FederationError> { - Err(SingleFederationError::Internal { + Err(SingleFederationError::UnsupportedFeature { message: String::from("@defer is currently not supported"), + kind: crate::error::UnsupportedFeatureKind::Defer, } .into()) } diff --git a/apollo-federation/src/schema/field_set.rs b/apollo-federation/src/schema/field_set.rs index 6aee222a35..442162efd9 100644 --- a/apollo-federation/src/schema/field_set.rs +++ b/apollo-federation/src/schema/field_set.rs @@ -41,7 +41,8 @@ fn check_absence_of_aliases(selection_set: &SelectionSet) -> Result<(), Federati errors.push(SingleFederationError::UnsupportedFeature { // PORT_NOTE: The JS version also quotes the directive name in the error message. // For example, "aliases are not currently supported in @requires". - message: format!(r#"Cannot use alias "{alias}" in "{}": aliases are not currently supported in the used directive"#, field.field) + message: format!(r#"Cannot use alias "{alias}" in "{}": aliases are not currently supported in the used directive"#, field.field), + kind: crate::error::UnsupportedFeatureKind::Alias }.into()); } if let Some(selection_set) = &field.selection_set { diff --git a/apollo-router/src/error.rs b/apollo-router/src/error.rs index 2ebf66bd4d..80b075a915 100644 --- a/apollo-router/src/error.rs +++ b/apollo-router/src/error.rs @@ -233,8 +233,8 @@ pub(crate) enum ServiceBuildError { /// couldn't build Query Planner Service: {0} QueryPlannerError(QueryPlannerError), - /// The supergraph schema failed to produce a valid API schema: {0} - ApiSchemaError(FederationError), + /// failed to initialize the query planner: {0} + QpInitError(FederationError), /// schema error: {0} Schema(SchemaError), @@ -249,12 +249,6 @@ impl From for ServiceBuildError { } } -impl From for ServiceBuildError { - fn from(err: FederationError) -> Self { - ServiceBuildError::ApiSchemaError(err) - } -} - impl From> for ServiceBuildError { fn from(errors: Vec) -> Self { ServiceBuildError::QueryPlannerError(errors.into()) diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index cb6335afb0..fc4ccce41d 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -10,6 +10,7 @@ use apollo_compiler::ast; use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_federation::error::FederationError; +use apollo_federation::error::SingleFederationError; use apollo_federation::query_plan::query_planner::QueryPlanner; use futures::future::BoxFuture; use opentelemetry_api::metrics::MeterProvider as _; @@ -64,6 +65,10 @@ use crate::Configuration; pub(crate) const RUST_QP_MODE: &str = "rust"; const JS_QP_MODE: &str = "js"; +const UNSUPPORTED_CONTEXT: &str = "context"; +const UNSUPPORTED_OVERRIDES: &str = "overrides"; +const UNSUPPORTED_FED1: &str = "fed1"; +const INTERNAL_INIT_ERROR: &str = "internal"; #[derive(Clone)] /// A query planner that calls out to the nodejs router-bridge query planner. @@ -162,10 +167,7 @@ impl PlannerMode { QueryPlannerMode::BothBestEffort => match Self::rust(schema, configuration) { Ok(planner) => Ok(Some(planner)), Err(error) => { - tracing::warn!( - "Failed to initialize the new query planner, \ - falling back to legacy: {error}" - ); + tracing::info!("Falling back to the legacy query planner: {error}"); Ok(None) } }, @@ -189,10 +191,34 @@ impl PlannerMode { }, debug: Default::default(), }; - Ok(Arc::new(QueryPlanner::new( - schema.federation_supergraph(), - config, - )?)) + let result = QueryPlanner::new(schema.federation_supergraph(), config); + + match &result { + Err(FederationError::SingleFederationError { + inner: error, + trace: _, + }) => match error { + SingleFederationError::UnsupportedFederationVersion { .. } => { + metric_rust_qp_init(Some(UNSUPPORTED_FED1)); + } + SingleFederationError::UnsupportedFeature { message: _, kind } => match kind { + apollo_federation::error::UnsupportedFeatureKind::ProgressiveOverrides => { + metric_rust_qp_init(Some(UNSUPPORTED_OVERRIDES)) + } + apollo_federation::error::UnsupportedFeatureKind::Context => { + metric_rust_qp_init(Some(UNSUPPORTED_CONTEXT)) + } + _ => metric_rust_qp_init(Some(INTERNAL_INIT_ERROR)), + }, + _ => { + metric_rust_qp_init(Some(INTERNAL_INIT_ERROR)); + } + }, + Err(_) => metric_rust_qp_init(Some(INTERNAL_INIT_ERROR)), + Ok(_) => metric_rust_qp_init(None), + } + + Ok(Arc::new(result.map_err(ServiceBuildError::QpInitError)?)) } async fn js( @@ -975,6 +1001,25 @@ pub(crate) fn metric_query_planning_plan_duration(planner: &'static str, start: ); } +pub(crate) fn metric_rust_qp_init(init_error_kind: Option<&'static str>) { + if let Some(init_error_kind) = init_error_kind { + u64_counter!( + "apollo.router.lifecycle.query_planner.init", + "Rust query planner initialization", + 1, + "init.error_kind" = init_error_kind, + "init.is_success" = false + ); + } else { + u64_counter!( + "apollo.router.lifecycle.query_planner.init", + "Rust query planner initialization", + 1, + "init.is_success" = true + ); + } +} + #[cfg(test)] mod tests { use std::fs; @@ -1617,4 +1662,42 @@ mod tests { "planner" = "js" ); } + + #[test] + fn test_metric_rust_qp_initialization() { + metric_rust_qp_init(None); + assert_counter!( + "apollo.router.lifecycle.query_planner.init", + 1, + "init.is_success" = true + ); + metric_rust_qp_init(Some(UNSUPPORTED_CONTEXT)); + assert_counter!( + "apollo.router.lifecycle.query_planner.init", + 1, + "init.error_kind" = "context", + "init.is_success" = false + ); + metric_rust_qp_init(Some(UNSUPPORTED_OVERRIDES)); + assert_counter!( + "apollo.router.lifecycle.query_planner.init", + 1, + "init.error_kind" = "overrides", + "init.is_success" = false + ); + metric_rust_qp_init(Some(UNSUPPORTED_FED1)); + assert_counter!( + "apollo.router.lifecycle.query_planner.init", + 1, + "init.error_kind" = "fed1", + "init.is_success" = false + ); + metric_rust_qp_init(Some(INTERNAL_INIT_ERROR)); + assert_counter!( + "apollo.router.lifecycle.query_planner.init", + 1, + "init.error_kind" = "internal", + "init.is_success" = false + ); + } } diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index fa2d8aab66..e110726712 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -2,7 +2,6 @@ use std::collections::HashMap; use std::io; use std::sync::Arc; -use apollo_compiler::schema::ExtendedType; use apollo_compiler::validation::Valid; use axum::response::IntoResponse; use http::StatusCode; @@ -52,9 +51,6 @@ use crate::spec::Schema; use crate::ListenAddr; pub(crate) const STARTING_SPAN_NAME: &str = "starting"; -pub(crate) const OVERRIDE_LABEL_ARG_NAME: &str = "overrideLabel"; -pub(crate) const CONTEXT_DIRECTIVE: &str = "context"; -pub(crate) const JOIN_FIELD: &str = "join__field"; #[derive(Clone)] /// A path and a handler to be exposed as a web_endpoint for plugins @@ -238,13 +234,6 @@ impl YamlRouterFactory { ) .await?; - // Don't let the router start in experimental_query_planner_mode and - // unimplemented Rust QP features. - can_use_with_experimental_query_planner( - configuration.clone(), - supergraph_creator.schema(), - )?; - // Instantiate the parser here so we can use it to warm up the planner below let query_analysis_layer = QueryAnalysisLayer::new(supergraph_creator.schema(), Arc::clone(&configuration)).await; @@ -770,108 +759,6 @@ fn inject_schema_id(schema_id: Option<&str>, configuration: &mut Value) { } } -// The Rust QP has not yet implemented setContext -// (`@context` directives), progressive overrides, and it -// doesn't support fed v1 *supergraphs*. -// -// If users are using the Rust QP as standalone (`new`) or in comparison mode (`both`), -// fail to start up the router emitting an error. -fn can_use_with_experimental_query_planner( - configuration: Arc, - schema: Arc, -) -> Result<(), ConfigurationError> { - match configuration.experimental_query_planner_mode { - crate::configuration::QueryPlannerMode::New - | crate::configuration::QueryPlannerMode::Both => { - // We have a *progressive* override when `join__directive` has a - // non-null value for `overrideLabel` field. - // - // This looks at object types' fields and their directive - // applications, looking specifically for `@join__direcitve` - // arguments list. - let has_progressive_overrides = schema - .supergraph_schema() - .types - .values() - .filter_map(|extended_type| { - // The override label args can be only on ObjectTypes - if let ExtendedType::Object(object_type) = extended_type { - Some(object_type) - } else { - None - } - }) - .flat_map(|object_type| &object_type.fields) - .filter_map(|(_, field)| { - let join_field_directives = field - .directives - .iter() - .filter(|d| d.name.as_str() == JOIN_FIELD) - .collect::>(); - if !join_field_directives.is_empty() { - Some(join_field_directives) - } else { - None - } - }) - .flatten() - .any(|join_directive| { - if let Some(override_label_arg) = - join_directive.argument_by_name(OVERRIDE_LABEL_ARG_NAME) - { - // Any argument value for `overrideLabel` that's not - // null can be considered as progressive override usage - if !override_label_arg.is_null() { - return true; - } - return false; - } - false - }); - if has_progressive_overrides { - return Err(ConfigurationError::InvalidConfiguration { - message: "`experimental_query_planner_mode` cannot be used with progressive overrides", - error: "remove uses of progressive overrides to try the experimental_query_planner_mode in `both` or `new`, otherwise switch back to `legacy`.".to_string(), - }); - } - - // We will only check for `@context` direcive, since - // `@fromContext` can only be used if `@context` is already - // applied, and we assume a correctly composed supergraph. - // - // `@context` can only be applied on Object Types, Interface - // Types and Unions. For simplicity of this function, we just - // check all 'extended_type` directives. - let has_set_context = schema - .supergraph_schema() - .types - .values() - .any(|extended_type| extended_type.directives().has(CONTEXT_DIRECTIVE)); - if has_set_context { - return Err(ConfigurationError::InvalidConfiguration { - message: "`experimental_query_planner_mode` cannot be used with `@context`", - error: "remove uses of `@context` to try the experimental_query_planner_mode in `both` or `new`, otherwise switch back to `legacy`.".to_string(), - }); - } - - // Fed1 supergraphs will not work with the rust query planner. - let is_fed1_supergraph = match schema.federation_version() { - Some(v) => v == 1, - None => false, - }; - if is_fed1_supergraph { - return Err(ConfigurationError::InvalidConfiguration { - message: "`experimental_query_planner_mode` cannot be used with fed1 supergraph", - error: "switch back to `experimental_query_planner_mode: legacy` to use the router with fed1 supergraph".to_string(), - }); - } - - Ok(()) - } - crate::configuration::QueryPlannerMode::Legacy - | crate::configuration::QueryPlannerMode::BothBestEffort => Ok(()), - } -} #[cfg(test)] mod test { use std::sync::Arc; @@ -882,11 +769,9 @@ mod test { use tower_http::BoxError; use crate::configuration::Configuration; - use crate::configuration::QueryPlannerMode; use crate::plugin::Plugin; use crate::plugin::PluginInit; use crate::register_plugin; - use crate::router_factory::can_use_with_experimental_query_planner; use crate::router_factory::inject_schema_id; use crate::router_factory::RouterSuperServiceFactory; use crate::router_factory::YamlRouterFactory; @@ -1019,125 +904,4 @@ mod test { "8e2021d131b23684671c3b85f82dfca836908c6a541bbd5c3772c66e7f8429d8" ); } - - #[test] - fn test_cannot_use_context_with_experimental_query_planner() { - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Both, - ..Default::default() - }; - let schema = include_str!("testdata/supergraph_with_context.graphql"); - let schema = Arc::new(Schema::parse(schema, &config).unwrap()); - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), - "experimental_query_planner_mode: both cannot be used with @context" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::New, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), - "experimental_query_planner_mode: new cannot be used with @context" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Legacy, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_ok(), - "experimental_query_planner_mode: legacy should be able to be used with @context" - ); - } - - #[test] - fn test_cannot_use_progressive_overrides_with_experimental_query_planner() { - // PROGRESSIVE OVERRIDES - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Both, - ..Default::default() - }; - let schema = include_str!("testdata/supergraph_with_override_label.graphql"); - let schema = Arc::new(Schema::parse(schema, &config).unwrap()); - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), - "experimental_query_planner_mode: both cannot be used with progressive overrides" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::New, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), - "experimental_query_planner_mode: new cannot be used with progressive overrides" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Legacy, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_ok(), - "experimental_query_planner_mode: legacy should be able to be used with progressive overrides" - ); - } - - #[test] - fn test_cannot_use_fed1_supergraphs_with_experimental_query_planner() { - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Both, - ..Default::default() - }; - let schema = include_str!("testdata/supergraph.graphql"); - let schema = Arc::new(Schema::parse(schema, &config).unwrap()); - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), - "experimental_query_planner_mode: both cannot be used with fed1 supergraph" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::New, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), - "experimental_query_planner_mode: new cannot be used with fed1 supergraph" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Legacy, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_ok(), - "experimental_query_planner_mode: legacy should be able to be used with fed1 supergraph" - ); - } - - #[test] - fn test_can_use_fed2_supergraphs_with_experimental_query_planner() { - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Both, - ..Default::default() - }; - let schema = include_str!("testdata/minimal_fed2_supergraph.graphql"); - let schema = Arc::new(Schema::parse(schema, &config).unwrap()); - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_ok(), - "experimental_query_planner_mode: both can be used" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::New, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_ok(), - "experimental_query_planner_mode: new can be used" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Legacy, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_ok(), - "experimental_query_planner_mode: legacy can be used" - ); - } } diff --git a/apollo-router/tests/fixtures/broken-supergraph.graphql b/apollo-router/tests/fixtures/broken-supergraph.graphql new file mode 100644 index 0000000000..eafc474b2b --- /dev/null +++ b/apollo-router/tests/fixtures/broken-supergraph.graphql @@ -0,0 +1,127 @@ +schema + # this is missing a link directive spec definition + # @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { + query: Query + mutation: Mutation +} + +directive @link( + url: String + as: String + for: link__Purpose + import: [link__Import] +) repeatable on SCHEMA + +directive @join__field( + graph: join__Graph! + requires: join__FieldSet + provides: join__FieldSet + type: String + external: Boolean +) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__type( + graph: join__Graph! + key: join__FieldSet +) repeatable on OBJECT | INTERFACE + +directive @join__owner(graph: join__Graph!) on OBJECT | INTERFACE + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements( + graph: join__Graph! + interface: String! +) repeatable on OBJECT | INTERFACE + +directive @join__unionMember( + graph: join__Graph! + member: String! +) repeatable on UNION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @tag( + name: String! +) repeatable on FIELD_DEFINITION | INTERFACE | OBJECT | UNION + +directive @inaccessible on OBJECT | FIELD_DEFINITION | INTERFACE | UNION + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar join__FieldSet + +scalar federation__Scope + +enum join__Graph { + ACCOUNTS + @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev") + INVENTORY + @join__graph(name: "inventory", url: "https://inventory.demo.starstuff.dev") + PRODUCTS + @join__graph(name: "products", url: "https://products.demo.starstuff.dev") + REVIEWS + @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev") +} +type Mutation @join__type(graph: PRODUCTS) @join__type(graph: REVIEWS) { + createProduct(name: String, upc: ID!): Product @join__field(graph: PRODUCTS) + createReview(body: String, id: ID!, upc: ID!): Review + @join__field(graph: REVIEWS) +} + +type Product + @join__type(graph: PRODUCTS, key: "upc") + @join__type(graph: INVENTORY, key: "upc") + @join__type(graph: REVIEWS, key: "upc") { + inStock: Boolean + @join__field(graph: INVENTORY) + @tag(name: "private") + @inaccessible + name: String @join__field(graph: PRODUCTS) + price: Int @join__field(graph: PRODUCTS) + reviews: [Review] @join__field(graph: REVIEWS) + reviewsForAuthor(authorID: ID!): [Review] @join__field(graph: REVIEWS) + upc: String! + @join__field(graph: PRODUCTS) + @join__field(graph: INVENTORY, external: true) + @join__field(graph: REVIEWS, external: true) + weight: Int @join__field(graph: PRODUCTS) +} + +type Query @join__type(graph: ACCOUNTS) @join__type(graph: PRODUCTS) { + me: User @join__field(graph: ACCOUNTS) + topProducts(first: Int = 5): [Product] @join__field(graph: PRODUCTS) +} + +type Review + @join__owner(graph: REVIEWS) + @join__type(graph: REVIEWS, key: "id") { + author: User @join__field(graph: REVIEWS) + body: String @join__field(graph: REVIEWS) + id: ID! + product: Product @join__field(graph: REVIEWS) +} + +type User + @join__owner(graph: ACCOUNTS) + @join__type(graph: ACCOUNTS, key: "id") + @join__type(graph: REVIEWS, key: "id") { + id: ID! + name: String @join__field(graph: ACCOUNTS) + + reviews: [Review] @join__field(graph: REVIEWS) + username: String @join__field(graph: ACCOUNTS) +} diff --git a/apollo-router/tests/fixtures/valid-supergraph.graphql b/apollo-router/tests/fixtures/valid-supergraph.graphql new file mode 100644 index 0000000000..fe43cc6964 --- /dev/null +++ b/apollo-router/tests/fixtures/valid-supergraph.graphql @@ -0,0 +1,126 @@ +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { + query: Query + mutation: Mutation +} + +directive @link( + url: String + as: String + for: link__Purpose + import: [link__Import] +) repeatable on SCHEMA + +directive @join__field( + graph: join__Graph! + requires: join__FieldSet + provides: join__FieldSet + type: String + external: Boolean +) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__type( + graph: join__Graph! + key: join__FieldSet +) repeatable on OBJECT | INTERFACE + +directive @join__owner(graph: join__Graph!) on OBJECT | INTERFACE + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements( + graph: join__Graph! + interface: String! +) repeatable on OBJECT | INTERFACE + +directive @join__unionMember( + graph: join__Graph! + member: String! +) repeatable on UNION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @tag( + name: String! +) repeatable on FIELD_DEFINITION | INTERFACE | OBJECT | UNION + +directive @inaccessible on OBJECT | FIELD_DEFINITION | INTERFACE | UNION + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar join__FieldSet + +scalar federation__Scope + +enum join__Graph { + ACCOUNTS + @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev") + INVENTORY + @join__graph(name: "inventory", url: "https://inventory.demo.starstuff.dev") + PRODUCTS + @join__graph(name: "products", url: "https://products.demo.starstuff.dev") + REVIEWS + @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev") +} +type Mutation @join__type(graph: PRODUCTS) @join__type(graph: REVIEWS) { + createProduct(name: String, upc: ID!): Product @join__field(graph: PRODUCTS) + createReview(body: String, id: ID!, upc: ID!): Review + @join__field(graph: REVIEWS) +} + +type Product + @join__type(graph: PRODUCTS, key: "upc") + @join__type(graph: INVENTORY, key: "upc") + @join__type(graph: REVIEWS, key: "upc") { + inStock: Boolean + @join__field(graph: INVENTORY) + @tag(name: "private") + @inaccessible + name: String @join__field(graph: PRODUCTS) + price: Int @join__field(graph: PRODUCTS) + reviews: [Review] @join__field(graph: REVIEWS) + reviewsForAuthor(authorID: ID!): [Review] @join__field(graph: REVIEWS) + upc: String! + @join__field(graph: PRODUCTS) + @join__field(graph: INVENTORY, external: true) + @join__field(graph: REVIEWS, external: true) + weight: Int @join__field(graph: PRODUCTS) +} + +type Query @join__type(graph: ACCOUNTS) @join__type(graph: PRODUCTS) { + me: User @join__field(graph: ACCOUNTS) + topProducts(first: Int = 5): [Product] @join__field(graph: PRODUCTS) +} + +type Review + @join__owner(graph: REVIEWS) + @join__type(graph: REVIEWS, key: "id") { + author: User @join__field(graph: REVIEWS) + body: String @join__field(graph: REVIEWS) + id: ID! + product: Product @join__field(graph: REVIEWS) +} + +type User + @join__owner(graph: ACCOUNTS) + @join__type(graph: ACCOUNTS, key: "id") + @join__type(graph: REVIEWS, key: "id") { + id: ID! + name: String @join__field(graph: ACCOUNTS) + + reviews: [Review] @join__field(graph: REVIEWS) + username: String @join__field(graph: ACCOUNTS) +} diff --git a/apollo-router/tests/integration/lifecycle.rs b/apollo-router/tests/integration/lifecycle.rs index 2f7feea952..71af2dbcf8 100644 --- a/apollo-router/tests/integration/lifecycle.rs +++ b/apollo-router/tests/integration/lifecycle.rs @@ -460,73 +460,3 @@ fn test_plugin_ordering_push_trace(context: &Context, entry: String) { ) .unwrap(); } - -#[tokio::test(flavor = "multi_thread")] -async fn fed1_schema_with_legacy_qp() { - let mut router = IntegrationTest::builder() - .config("experimental_query_planner_mode: legacy") - .supergraph("../examples/graphql/local.graphql") - .build() - .await; - router.start().await; - router.assert_started().await; - router.execute_default_query().await; - router.graceful_shutdown().await; -} - -#[tokio::test(flavor = "multi_thread")] -async fn fed1_schema_with_new_qp() { - let mut router = IntegrationTest::builder() - .config("experimental_query_planner_mode: new") - .supergraph("../examples/graphql/local.graphql") - .build() - .await; - router.start().await; - router - .assert_log_contains( - "could not create router: \ - The supergraph schema failed to produce a valid API schema: \ - Supergraphs composed with federation version 1 are not supported.", - ) - .await; - router.assert_shutdown().await; -} - -#[tokio::test(flavor = "multi_thread")] -async fn fed1_schema_with_both_qp() { - let mut router = IntegrationTest::builder() - .config("experimental_query_planner_mode: both") - .supergraph("../examples/graphql/local.graphql") - .build() - .await; - router.start().await; - router - .assert_log_contains( - "could not create router: \ - The supergraph schema failed to produce a valid API schema: \ - Supergraphs composed with federation version 1 are not supported.", - ) - .await; - router.assert_shutdown().await; -} - -#[tokio::test(flavor = "multi_thread")] -async fn fed1_schema_with_both_best_effort_qp() { - let mut router = IntegrationTest::builder() - .config("experimental_query_planner_mode: both_best_effort") - .supergraph("../examples/graphql/local.graphql") - .build() - .await; - router.start().await; - router - .assert_log_contains( - "Failed to initialize the new query planner, falling back to legacy: \ - The supergraph schema failed to produce a valid API schema: \ - Supergraphs composed with federation version 1 are not supported. \ - Please recompose your supergraph with federation version 2 or greater", - ) - .await; - router.assert_started().await; - router.execute_default_query().await; - router.graceful_shutdown().await; -} diff --git a/apollo-router/tests/integration/mod.rs b/apollo-router/tests/integration/mod.rs index 7ab2f50d95..f4c840d9e4 100644 --- a/apollo-router/tests/integration/mod.rs +++ b/apollo-router/tests/integration/mod.rs @@ -8,6 +8,7 @@ mod docs; mod file_upload; mod lifecycle; mod operation_limits; +mod query_planner; mod subgraph_response; mod traffic_shaping; diff --git a/apollo-router/tests/integration/query_planner.rs b/apollo-router/tests/integration/query_planner.rs new file mode 100644 index 0000000000..9c85c99690 --- /dev/null +++ b/apollo-router/tests/integration/query_planner.rs @@ -0,0 +1,466 @@ +use std::path::PathBuf; + +use crate::integration::common::graph_os_enabled; +use crate::integration::IntegrationTest; + +const PROMETHEUS_METRICS_CONFIG: &str = include_str!("telemetry/fixtures/prometheus.router.yaml"); +const LEGACY_QP: &str = "experimental_query_planner_mode: legacy"; +const NEW_QP: &str = "experimental_query_planner_mode: new"; +const BOTH_QP: &str = "experimental_query_planner_mode: both"; +const BOTH_BEST_EFFORT_QP: &str = "experimental_query_planner_mode: both_best_effort"; + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_legacy_qp() { + let mut router = IntegrationTest::builder() + .config(LEGACY_QP) + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_new_qp() { + let mut router = IntegrationTest::builder() + .config(NEW_QP) + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + failed to initialize the query planner: \ + Supergraphs composed with federation version 1 are not supported.", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_both_qp() { + let mut router = IntegrationTest::builder() + .config(BOTH_QP) + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + failed to initialize the query planner: \ + Supergraphs composed with federation version 1 are not supported.", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_both_best_effort_qp() { + let mut router = IntegrationTest::builder() + .config(BOTH_BEST_EFFORT_QP) + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "Falling back to the legacy query planner: \ + failed to initialize the query planner: \ + Supergraphs composed with federation version 1 are not supported. \ + Please recompose your supergraph with federation version 2 or greater", + ) + .await; + router.assert_started().await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_legacy_qp_reload_to_new_keep_previous_config() { + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{LEGACY_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{NEW_QP}"); + router.update_config(&config).await; + router + .assert_log_contains("error while reloading, continuing with previous configuration") + .await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_error_kind="fed1",init_is_success="false",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_legacy_qp_reload_to_both_best_effort_keep_previous_config() { + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{LEGACY_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{BOTH_BEST_EFFORT_QP}"); + router.update_config(&config).await; + router + .assert_log_contains( + "Falling back to the legacy query planner: \ + failed to initialize the query planner: \ + Supergraphs composed with federation version 1 are not supported. \ + Please recompose your supergraph with federation version 2 or greater", + ) + .await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_error_kind="fed1",init_is_success="false",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed2_schema_with_new_qp() { + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{NEW_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("../examples/graphql/supergraph-fed2.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_is_success="true",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn progressive_override_with_legacy_qp() { + if !graph_os_enabled() { + return; + } + let mut router = IntegrationTest::builder() + .config(LEGACY_QP) + .supergraph("src/plugins/progressive_override/testdata/supergraph.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn progressive_override_with_new_qp() { + if !graph_os_enabled() { + return; + } + let mut router = IntegrationTest::builder() + .config(NEW_QP) + .supergraph("src/plugins/progressive_override/testdata/supergraph.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + failed to initialize the query planner: \ + `experimental_query_planner_mode: new` or `both` cannot yet \ + be used with progressive overrides. \ + Remove uses of progressive overrides to try the experimental query planner, \ + otherwise switch back to `legacy` or `both_best_effort`.", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn progressive_override_with_legacy_qp_change_to_new_qp_keeps_old_config() { + if !graph_os_enabled() { + return; + } + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{LEGACY_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("src/plugins/progressive_override/testdata/supergraph.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{NEW_QP}"); + router.update_config(&config).await; + router + .assert_log_contains("error while reloading, continuing with previous configuration") + .await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_error_kind="overrides",init_is_success="false",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn progressive_override_with_legacy_qp_reload_to_both_best_effort_keep_previous_config() { + if !graph_os_enabled() { + return; + } + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{LEGACY_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("src/plugins/progressive_override/testdata/supergraph.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{BOTH_BEST_EFFORT_QP}"); + router.update_config(&config).await; + router + .assert_log_contains( + "Falling back to the legacy query planner: \ + failed to initialize the query planner: \ + `experimental_query_planner_mode: new` or `both` cannot yet \ + be used with progressive overrides. \ + Remove uses of progressive overrides to try the experimental query planner, \ + otherwise switch back to `legacy` or `both_best_effort`.", + ) + .await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_error_kind="overrides",init_is_success="false",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn context_with_legacy_qp() { + if !graph_os_enabled() { + return; + } + let mut router = IntegrationTest::builder() + .config(PROMETHEUS_METRICS_CONFIG) + .supergraph("tests/fixtures/set_context/supergraph.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn context_with_new_qp() { + if !graph_os_enabled() { + return; + } + let mut router = IntegrationTest::builder() + .config(NEW_QP) + .supergraph("tests/fixtures/set_context/supergraph.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + failed to initialize the query planner: \ + `experimental_query_planner_mode: new` or `both` cannot yet \ + be used with `@context`. \ + Remove uses of `@context` to try the experimental query planner, \ + otherwise switch back to `legacy` or `both_best_effort`.", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn context_with_legacy_qp_change_to_new_qp_keeps_old_config() { + if !graph_os_enabled() { + return; + } + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{LEGACY_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("tests/fixtures/set_context/supergraph.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{NEW_QP}"); + router.update_config(&config).await; + router + .assert_log_contains("error while reloading, continuing with previous configuration") + .await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_error_kind="context",init_is_success="false",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn context_with_legacy_qp_reload_to_both_best_effort_keep_previous_config() { + if !graph_os_enabled() { + return; + } + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{LEGACY_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("tests/fixtures/set_context/supergraph.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{BOTH_BEST_EFFORT_QP}"); + router.update_config(&config).await; + router + .assert_log_contains( + "Falling back to the legacy query planner: \ + failed to initialize the query planner: \ + `experimental_query_planner_mode: new` or `both` cannot yet \ + be used with `@context`. \ + Remove uses of `@context` to try the experimental query planner, \ + otherwise switch back to `legacy` or `both_best_effort`.", + ) + .await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_error_kind="context",init_is_success="false",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn invalid_schema_with_legacy_qp_fails_startup() { + let mut router = IntegrationTest::builder() + .config(LEGACY_QP) + .supergraph("tests/fixtures/broken-supergraph.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + Federation error: Invalid supergraph: must be a core schema", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn invalid_schema_with_new_qp_fails_startup() { + let mut router = IntegrationTest::builder() + .config(NEW_QP) + .supergraph("tests/fixtures/broken-supergraph.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + Federation error: Invalid supergraph: must be a core schema", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn invalid_schema_with_both_qp_fails_startup() { + let mut router = IntegrationTest::builder() + .config(BOTH_QP) + .supergraph("tests/fixtures/broken-supergraph.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + Federation error: Invalid supergraph: must be a core schema", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn invalid_schema_with_both_best_effort_qp_fails_startup() { + let mut router = IntegrationTest::builder() + .config(BOTH_BEST_EFFORT_QP) + .supergraph("tests/fixtures/broken-supergraph.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + Federation error: Invalid supergraph: must be a core schema", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn valid_schema_with_new_qp_change_to_broken_schema_keeps_old_config() { + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{NEW_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("tests/fixtures/valid-supergraph.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_is_success="true",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router + .update_schema(&PathBuf::from("tests/fixtures/broken-supergraph.graphql")) + .await; + router + .assert_log_contains("error while reloading, continuing with previous configuration") + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} From 1ea3d787f83fd001dcbb277747df66f86e45458c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Tue, 20 Aug 2024 12:58:59 +0200 Subject: [PATCH 081/108] fix(federation): validate query against API schema in CLI (#5852) --- apollo-federation/cli/src/main.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/apollo-federation/cli/src/main.rs b/apollo-federation/cli/src/main.rs index 7e707434e2..ab42f16151 100644 --- a/apollo-federation/cli/src/main.rs +++ b/apollo-federation/cli/src/main.rs @@ -248,11 +248,12 @@ fn cmd_plan( ) -> Result<(), FederationError> { let query = read_input(query_path); let supergraph = load_supergraph(schema_paths)?; - let query_doc = - ExecutableDocument::parse_and_validate(supergraph.schema.schema(), query, query_path)?; - let config = QueryPlannerConfig::from(planner); + let config = QueryPlannerConfig::from(planner); let planner = QueryPlanner::new(&supergraph, config)?; + + let query_doc = + ExecutableDocument::parse_and_validate(planner.api_schema().schema(), query, query_path)?; print!("{}", planner.build_query_plan(&query_doc, None)?); Ok(()) } From c461392216fd7fa18846bcc8c4895ced31b3e86c Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 20 Aug 2024 15:25:22 +0200 Subject: [PATCH 082/108] set the subgraph error path if not present (#5773) This fixes subgraph response conversion to set the error path in all cases. For some network level errors, the subgraph service was not setting the path --- .changesets/fix_geal_subgraph_error_path.md | 5 + ...factory__tests__defer_is_not_buffered.snap | 4 + .../src/plugins/include_subgraph_errors.rs | 7 +- apollo-router/src/query_planner/fetch.rs | 13 +- ...__supergraph__tests__missing_entities.snap | 8 +- apollo-router/tests/integration/batching.rs | 159 ++++++++++-------- ...raffic_shaping__subgraph_rate_limit-2.snap | 2 +- ...on__traffic_shaping__subgraph_timeout.snap | 4 +- .../tests/integration/subgraph_response.rs | 1 + 9 files changed, 117 insertions(+), 86 deletions(-) create mode 100644 .changesets/fix_geal_subgraph_error_path.md diff --git a/.changesets/fix_geal_subgraph_error_path.md b/.changesets/fix_geal_subgraph_error_path.md new file mode 100644 index 0000000000..21c32032eb --- /dev/null +++ b/.changesets/fix_geal_subgraph_error_path.md @@ -0,0 +1,5 @@ +### set the subgraph error path if not present ([PR #5773](https://github.com/apollographql/router/pull/5773)) + +This fixes subgraph response conversion to set the error path in all cases. For some network level errors, the subgraph service was not setting the path + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5773 \ No newline at end of file diff --git a/apollo-router/src/axum_factory/snapshots/apollo_router__axum_factory__tests__defer_is_not_buffered.snap b/apollo-router/src/axum_factory/snapshots/apollo_router__axum_factory__tests__defer_is_not_buffered.snap index e60d87a783..6d6e785101 100644 --- a/apollo-router/src/axum_factory/snapshots/apollo_router__axum_factory__tests__defer_is_not_buffered.snap +++ b/apollo-router/src/axum_factory/snapshots/apollo_router__axum_factory__tests__defer_is_not_buffered.snap @@ -21,6 +21,10 @@ expression: parts "errors": [ { "message": "couldn't find mock for query {\"query\":\"query TopProducts__reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{__typename id product{__typename upc}}}}}\",\"operationName\":\"TopProducts__reviews__1\",\"variables\":{\"representations\":[{\"__typename\":\"Product\",\"upc\":\"1\"},{\"__typename\":\"Product\",\"upc\":\"2\"}]}}", + "path": [ + "topProducts", + "@" + ], "extensions": { "code": "FETCH_ERROR" } diff --git a/apollo-router/src/plugins/include_subgraph_errors.rs b/apollo-router/src/plugins/include_subgraph_errors.rs index 56aae8045c..4f558ced82 100644 --- a/apollo-router/src/plugins/include_subgraph_errors.rs +++ b/apollo-router/src/plugins/include_subgraph_errors.rs @@ -104,19 +104,20 @@ mod test { use crate::Configuration; static UNREDACTED_PRODUCT_RESPONSE: Lazy = Lazy::new(|| { - Bytes::from_static(r#"{"data":{"topProducts":null},"errors":[{"message":"couldn't find mock for query {\"query\":\"query ErrorTopProducts__products__0($first:Int){topProducts(first:$first){__typename upc name}}\",\"operationName\":\"ErrorTopProducts__products__0\",\"variables\":{\"first\":2}}","extensions":{"test":"value","code":"FETCH_ERROR"}}]}"#.as_bytes()) + Bytes::from_static(r#"{"data":{"topProducts":null},"errors":[{"message":"couldn't find mock for query {\"query\":\"query ErrorTopProducts__products__0($first:Int){topProducts(first:$first){__typename upc name}}\",\"operationName\":\"ErrorTopProducts__products__0\",\"variables\":{\"first\":2}}","path":[],"extensions":{"test":"value","code":"FETCH_ERROR"}}]}"#.as_bytes()) }); static REDACTED_PRODUCT_RESPONSE: Lazy = Lazy::new(|| { Bytes::from_static( - r#"{"data":{"topProducts":null},"errors":[{"message":"Subgraph errors redacted"}]}"# + r#"{"data":{"topProducts":null},"errors":[{"message":"Subgraph errors redacted","path":[]}]}"# .as_bytes(), ) }); static REDACTED_ACCOUNT_RESPONSE: Lazy = Lazy::new(|| { Bytes::from_static( - r#"{"data":null,"errors":[{"message":"Subgraph errors redacted"}]}"#.as_bytes(), + r#"{"data":null,"errors":[{"message":"Subgraph errors redacted","path":[]}]}"# + .as_bytes(), ) }); diff --git a/apollo-router/src/query_planner/fetch.rs b/apollo-router/src/query_planner/fetch.rs index 05b871e60e..47069283ca 100644 --- a/apollo-router/src/query_planner/fetch.rs +++ b/apollo-router/src/query_planner/fetch.rs @@ -582,6 +582,7 @@ impl FetchNode { errors.push(error); } } else { + error.path = Some(current_dir.clone()); errors.push(error); } } @@ -639,13 +640,17 @@ impl FetchNode { .errors .into_iter() .map(|error| { - let path = error.path.as_ref().map(|path| { - Path::from_iter(current_slice.iter().chain(path.iter()).cloned()) - }); + let path = error + .path + .as_ref() + .map(|path| { + Path::from_iter(current_slice.iter().chain(path.iter()).cloned()) + }) + .unwrap_or_else(|| current_dir.clone()); Error { locations: error.locations, - path, + path: Some(path), message: error.message, extensions: error.extensions, } diff --git a/apollo-router/src/services/supergraph/snapshots/apollo_router__services__supergraph__tests__missing_entities.snap b/apollo-router/src/services/supergraph/snapshots/apollo_router__services__supergraph__tests__missing_entities.snap index 33f7508979..a4366f1d9a 100644 --- a/apollo-router/src/services/supergraph/snapshots/apollo_router__services__supergraph__tests__missing_entities.snap +++ b/apollo-router/src/services/supergraph/snapshots/apollo_router__services__supergraph__tests__missing_entities.snap @@ -1,5 +1,5 @@ --- -source: apollo-router/src/services/supergraph_service.rs +source: apollo-router/src/services/supergraph/tests.rs expression: stream.next_response().await.unwrap() --- { @@ -14,7 +14,11 @@ expression: stream.next_response().await.unwrap() }, "errors": [ { - "message": "error" + "message": "error", + "path": [ + "currentUser", + "activeOrganization" + ] } ] } diff --git a/apollo-router/tests/integration/batching.rs b/apollo-router/tests/integration/batching.rs index c50c85054b..071ca5cb7a 100644 --- a/apollo-router/tests/integration/batching.rs +++ b/apollo-router/tests/integration/batching.rs @@ -140,19 +140,20 @@ async fn it_batches_with_errors_in_single_graph() -> Result<(), BoxError> { if test_is_enabled() { // Make sure that we got back what we wanted assert_yaml_snapshot!(responses, @r###" - --- - - data: - entryA: - index: 0 - - errors: - - message: expected error in A - - data: - entryA: - index: 2 - - data: - entryA: - index: 3 - "###); + --- + - data: + entryA: + index: 0 + - errors: + - message: expected error in A + path: [] + - data: + entryA: + index: 2 + - data: + entryA: + index: 3 + "###); } Ok(()) @@ -189,24 +190,26 @@ async fn it_batches_with_errors_in_multi_graph() -> Result<(), BoxError> { if test_is_enabled() { assert_yaml_snapshot!(responses, @r###" - --- - - data: - entryA: - index: 0 - - data: - entryB: - index: 0 - - errors: - - message: expected error in A - - errors: - - message: expected error in B - - data: - entryA: - index: 2 - - data: - entryB: - index: 2 - "###); + --- + - data: + entryA: + index: 0 + - data: + entryB: + index: 0 + - errors: + - message: expected error in A + path: [] + - errors: + - message: expected error in B + path: [] + - data: + entryA: + index: 2 + - data: + entryB: + index: 2 + "###); } Ok(()) @@ -250,6 +253,7 @@ async fn it_handles_short_timeouts() -> Result<(), BoxError> { index: 0 - errors: - message: Request timed out + path: [] extensions: code: REQUEST_TIMEOUT - data: @@ -257,6 +261,7 @@ async fn it_handles_short_timeouts() -> Result<(), BoxError> { index: 1 - errors: - message: Request timed out + path: [] extensions: code: REQUEST_TIMEOUT "###); @@ -323,14 +328,17 @@ async fn it_handles_indefinite_timeouts() -> Result<(), BoxError> { index: 2 - errors: - message: Request timed out + path: [] extensions: code: REQUEST_TIMEOUT - errors: - message: Request timed out + path: [] extensions: code: REQUEST_TIMEOUT - errors: - message: Request timed out + path: [] extensions: code: REQUEST_TIMEOUT "###); @@ -554,22 +562,24 @@ async fn it_handles_cancelled_by_coprocessor() -> Result<(), BoxError> { if test_is_enabled() { assert_yaml_snapshot!(responses, @r###" - --- - - errors: - - message: Subgraph A is not allowed - extensions: - code: ERR_NOT_ALLOWED - - data: - entryB: - index: 0 - - errors: - - message: Subgraph A is not allowed - extensions: - code: ERR_NOT_ALLOWED - - data: - entryB: - index: 1 - "###); + --- + - errors: + - message: Subgraph A is not allowed + path: [] + extensions: + code: ERR_NOT_ALLOWED + - data: + entryB: + index: 0 + - errors: + - message: Subgraph A is not allowed + path: [] + extensions: + code: ERR_NOT_ALLOWED + - data: + entryB: + index: 1 + "###); } Ok(()) @@ -697,33 +707,34 @@ async fn it_handles_single_request_cancelled_by_coprocessor() -> Result<(), BoxE if test_is_enabled() { assert_yaml_snapshot!(responses, @r###" - --- - - data: - entryA: - index: 0 - - data: - entryB: - index: 0 - - data: - entryA: - index: 1 - - data: - entryB: - index: 1 - - errors: - - message: Subgraph A index 2 is not allowed - extensions: - code: ERR_NOT_ALLOWED - - data: - entryB: - index: 2 - - data: - entryA: - index: 3 - - data: - entryB: - index: 3 - "###); + --- + - data: + entryA: + index: 0 + - data: + entryB: + index: 0 + - data: + entryA: + index: 1 + - data: + entryB: + index: 1 + - errors: + - message: Subgraph A index 2 is not allowed + path: [] + extensions: + code: ERR_NOT_ALLOWED + - data: + entryB: + index: 2 + - data: + entryA: + index: 3 + - data: + entryB: + index: 3 + "###); } Ok(()) diff --git a/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_rate_limit-2.snap b/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_rate_limit-2.snap index 584b125252..07df294289 100644 --- a/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_rate_limit-2.snap +++ b/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_rate_limit-2.snap @@ -2,4 +2,4 @@ source: apollo-router/tests/integration/traffic_shaping.rs expression: response --- -"{\"data\":null,\"errors\":[{\"message\":\"Your request has been rate limited\",\"extensions\":{\"code\":\"REQUEST_RATE_LIMITED\"}}]}" +"{\"data\":null,\"errors\":[{\"message\":\"Your request has been rate limited\",\"path\":[],\"extensions\":{\"code\":\"REQUEST_RATE_LIMITED\"}}]}" diff --git a/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_timeout.snap b/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_timeout.snap index 671e207784..407674dfff 100644 --- a/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_timeout.snap +++ b/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_timeout.snap @@ -1,5 +1,5 @@ --- source: apollo-router/tests/integration/traffic_shaping.rs -expression: response.text().await? +expression: response --- -"{\"data\":null,\"errors\":[{\"message\":\"Request timed out\",\"extensions\":{\"code\":\"REQUEST_TIMEOUT\"}}]}" +"{\"data\":null,\"errors\":[{\"message\":\"Request timed out\",\"path\":[],\"extensions\":{\"code\":\"REQUEST_TIMEOUT\"}}]}" diff --git a/apollo-router/tests/integration/subgraph_response.rs b/apollo-router/tests/integration/subgraph_response.rs index 5e6e831d3c..2dd8fc68d6 100644 --- a/apollo-router/tests/integration/subgraph_response.rs +++ b/apollo-router/tests/integration/subgraph_response.rs @@ -118,6 +118,7 @@ async fn test_invalid_error_locations() -> Result<(), BoxError> { "data": null, "errors": [{ "message":"service 'products' response was malformed: invalid `locations` within error: invalid type: boolean `true`, expected u32", + "path": [], "extensions": { "service": "products", "reason": "invalid `locations` within error: invalid type: boolean `true`, expected u32", From 8b3822f4068074bdbce0fd5697eb807f504d19ec Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 20 Aug 2024 17:02:23 +0200 Subject: [PATCH 083/108] reactivate the entity cache key invalidation test (#5818) --- .../{skipped.json => plan.json} | 10 +++++++++- apollo-router/tests/samples_tests.rs | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) rename apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/{skipped.json => plan.json} (96%) diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/skipped.json b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json similarity index 96% rename from apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/skipped.json rename to apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json index c08682fff8..1bb1bc0210 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/skipped.json +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json @@ -172,15 +172,23 @@ }, "expected_response":{ "data":{ - "topProducts":[{"reviews":null},{"reviews":null}] + "topProducts":[ + {"reviews": [{ + "body": "A" + },{ + "body": "B" + }]}, + {"reviews":null}] }, "errors":[ { "message":"HTTP fetch failed from 'invalidation-entity-key-reviews': 500: Internal Server Error", + "path": ["topProducts", 1], "extensions":{"code":"SUBREQUEST_HTTP_ERROR","service":"invalidation-entity-key-reviews","reason":"500: Internal Server Error","http":{"status":500}} }, { "message":"service 'invalidation-entity-key-reviews' response was malformed: {}", + "path": ["topProducts", 1], "extensions":{"service":"invalidation-entity-key-reviews","reason":"{}","code":"SUBREQUEST_MALFORMED_RESPONSE"} } ] diff --git a/apollo-router/tests/samples_tests.rs b/apollo-router/tests/samples_tests.rs index fe7d87ed71..e3fd0d5264 100644 --- a/apollo-router/tests/samples_tests.rs +++ b/apollo-router/tests/samples_tests.rs @@ -472,7 +472,7 @@ impl TestExecution { writeln!(out, "assertion `left == right` failed").unwrap(); writeln!( out, - " expected: {}", + "expected: {}", serde_json::to_string(&expected_response).unwrap() ) .unwrap(); From 7e29b5df7863d3628ac6200e5021958d02269746 Mon Sep 17 00:00:00 2001 From: Taylor Ninesling Date: Tue, 20 Aug 2024 08:32:34 -0700 Subject: [PATCH 084/108] Account for demand control directives when scoring operations (#5777) --- .../feat_tninesling_cost_directives.md | 28 ++ .../cost_calculator/directives.rs | 244 ++++++++++- .../fixtures/basic_input_object_query.graphql | 4 +- .../basic_input_object_query_2.graphql | 8 + .../fixtures/basic_input_object_response.json | 9 + .../fixtures/basic_schema.graphql | 3 + .../fixtures/custom_cost_query.graphql | 20 + ...uery_with_default_slicing_argument.graphql | 20 + .../fixtures/custom_cost_response.json | 24 + .../fixtures/custom_cost_schema.graphql | 154 +++++++ ...ost_schema_with_renamed_directives.graphql | 163 +++++++ .../demand_control/cost_calculator/mod.rs | 1 + .../demand_control/cost_calculator/schema.rs | 180 ++++++++ .../cost_calculator/static_cost.rs | 412 +++++++++++++----- .../src/plugins/demand_control/mod.rs | 34 +- .../plugins/demand_control/strategy/mod.rs | 14 +- 16 files changed, 1183 insertions(+), 135 deletions(-) create mode 100644 .changesets/feat_tninesling_cost_directives.md create mode 100644 apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query_2.graphql create mode 100644 apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_response.json create mode 100644 apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_query.graphql create mode 100644 apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_query_with_default_slicing_argument.graphql create mode 100644 apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_response.json create mode 100644 apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema.graphql create mode 100644 apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema_with_renamed_directives.graphql create mode 100644 apollo-router/src/plugins/demand_control/cost_calculator/schema.rs diff --git a/.changesets/feat_tninesling_cost_directives.md b/.changesets/feat_tninesling_cost_directives.md new file mode 100644 index 0000000000..e7994edc84 --- /dev/null +++ b/.changesets/feat_tninesling_cost_directives.md @@ -0,0 +1,28 @@ +### Account for demand control directives when scoring operations ([PR #5777](https://github.com/apollographql/router/pull/5777)) + +When scoring operations in the demand control plugin, utilize applications of `@cost` and `@listSize` from the supergraph schema to make better cost estimates. + +For expensive resolvers, the `@cost` directive can override the default weights in the cost calculation. + +```graphql +type Product { + id: ID! + name: String + expensiveField: Int @cost(weight: 20) +} +``` + +Additionally, if a list field's length differs significantly from the globally-configured list size, the `@listSize` directive can provide a tighter size estimate. + +```graphql +type Magazine { + # This is assumed to always return 5 items + headlines: [Article] @listSize(assumedSize: 5) + + # This is estimated to return as many items as are requested by the parameter named "first" + getPage(first: Int!, after: ID!): [Article] + @listSize(slicingArguments: ["first"]) +} +``` + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5777 diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs b/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs index b3f3afe372..c4dcc36b00 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs @@ -1,13 +1,112 @@ +use ahash::HashMap; +use ahash::HashMapExt; +use ahash::HashSet; +use apollo_compiler::ast::DirectiveList; +use apollo_compiler::ast::FieldDefinition; +use apollo_compiler::ast::InputValueDefinition; use apollo_compiler::ast::NamedType; use apollo_compiler::executable::Field; use apollo_compiler::executable::SelectionSet; +use apollo_compiler::name; use apollo_compiler::parser::Parser; +use apollo_compiler::schema::ExtendedType; use apollo_compiler::validation::Valid; +use apollo_compiler::Name; use apollo_compiler::Schema; +use apollo_federation::link::spec::APOLLO_SPEC_DOMAIN; +use apollo_federation::link::Link; use tower::BoxError; use super::DemandControlError; +const COST_DIRECTIVE_NAME: Name = name!("cost"); +const COST_DIRECTIVE_DEFAULT_NAME: Name = name!("federation__cost"); +const COST_DIRECTIVE_WEIGHT_ARGUMENT_NAME: Name = name!("weight"); + +const LIST_SIZE_DIRECTIVE_NAME: Name = name!("listSize"); +const LIST_SIZE_DIRECTIVE_DEFAULT_NAME: Name = name!("federation__listSize"); +const LIST_SIZE_DIRECTIVE_ASSUMED_SIZE_ARGUMENT_NAME: Name = name!("assumedSize"); +const LIST_SIZE_DIRECTIVE_SLICING_ARGUMENTS_ARGUMENT_NAME: Name = name!("slicingArguments"); +const LIST_SIZE_DIRECTIVE_SIZED_FIELDS_ARGUMENT_NAME: Name = name!("sizedFields"); +const LIST_SIZE_DIRECTIVE_REQUIRE_ONE_SLICING_ARGUMENT_ARGUMENT_NAME: Name = + name!("requireOneSlicingArgument"); + +pub(in crate::plugins::demand_control) fn get_apollo_directive_names( + schema: &Schema, +) -> HashMap { + let mut hm: HashMap = HashMap::new(); + for directive in &schema.schema_definition.directives { + if directive.name.as_str() == "link" { + if let Ok(link) = Link::from_directive_application(directive) { + if link.url.identity.domain != APOLLO_SPEC_DOMAIN { + continue; + } + for import in link.imports { + hm.insert(import.element.clone(), import.imported_name().clone()); + } + } + } + } + hm +} + +pub(in crate::plugins::demand_control) struct CostDirective { + weight: i32, +} + +impl CostDirective { + pub(in crate::plugins::demand_control) fn weight(&self) -> f64 { + self.weight as f64 + } + + pub(in crate::plugins::demand_control) fn from_argument( + directive_name_map: &HashMap, + argument: &InputValueDefinition, + ) -> Option { + Self::from_directives(directive_name_map, &argument.directives) + } + + pub(in crate::plugins::demand_control) fn from_field( + directive_name_map: &HashMap, + field: &FieldDefinition, + ) -> Option { + Self::from_directives(directive_name_map, &field.directives) + } + + pub(in crate::plugins::demand_control) fn from_type( + directive_name_map: &HashMap, + ty: &ExtendedType, + ) -> Option { + Self::from_schema_directives(directive_name_map, ty.directives()) + } + + fn from_directives( + directive_name_map: &HashMap, + directives: &DirectiveList, + ) -> Option { + directive_name_map + .get(&COST_DIRECTIVE_NAME) + .and_then(|name| directives.get(name)) + .or(directives.get(&COST_DIRECTIVE_DEFAULT_NAME)) + .and_then(|cost| cost.argument_by_name(&COST_DIRECTIVE_WEIGHT_ARGUMENT_NAME)) + .and_then(|weight| weight.to_i32()) + .map(|weight| Self { weight }) + } + + pub(in crate::plugins::demand_control) fn from_schema_directives( + directive_name_map: &HashMap, + directives: &apollo_compiler::schema::DirectiveList, + ) -> Option { + directive_name_map + .get(&COST_DIRECTIVE_NAME) + .and_then(|name| directives.get(name)) + .or(directives.get(&COST_DIRECTIVE_DEFAULT_NAME)) + .and_then(|cost| cost.argument_by_name(&COST_DIRECTIVE_WEIGHT_ARGUMENT_NAME)) + .and_then(|weight| weight.to_i32()) + .map(|weight| Self { weight }) + } +} + pub(in crate::plugins::demand_control) struct IncludeDirective { pub(in crate::plugins::demand_control) is_included: bool, } @@ -27,31 +126,142 @@ impl IncludeDirective { } } +pub(in crate::plugins::demand_control) struct ListSizeDirective<'schema> { + pub(in crate::plugins::demand_control) expected_size: Option, + pub(in crate::plugins::demand_control) sized_fields: Option>, +} + +impl<'schema> ListSizeDirective<'schema> { + pub(in crate::plugins::demand_control) fn size_of(&self, field: &Field) -> Option { + if self + .sized_fields + .as_ref() + .is_some_and(|sf| sf.contains(field.name.as_str())) + { + self.expected_size + } else { + None + } + } +} + +/// The `@listSize` directive from a field definition, which can be converted to +/// `ListSizeDirective` with a concrete field from a request. +pub(in crate::plugins::demand_control) struct DefinitionListSizeDirective { + assumed_size: Option, + slicing_argument_names: Option>, + sized_fields: Option>, + require_one_slicing_argument: bool, +} + +impl DefinitionListSizeDirective { + pub(in crate::plugins::demand_control) fn from_field_definition( + directive_name_map: &HashMap, + definition: &FieldDefinition, + ) -> Result, DemandControlError> { + let directive = directive_name_map + .get(&LIST_SIZE_DIRECTIVE_NAME) + .and_then(|name| definition.directives.get(name)) + .or(definition.directives.get(&LIST_SIZE_DIRECTIVE_DEFAULT_NAME)); + if let Some(directive) = directive { + let assumed_size = directive + .argument_by_name(&LIST_SIZE_DIRECTIVE_ASSUMED_SIZE_ARGUMENT_NAME) + .and_then(|arg| arg.to_i32()); + let slicing_argument_names = directive + .argument_by_name(&LIST_SIZE_DIRECTIVE_SLICING_ARGUMENTS_ARGUMENT_NAME) + .and_then(|arg| arg.as_list()) + .map(|arg_list| { + arg_list + .iter() + .flat_map(|arg| arg.as_str()) + .map(String::from) + .collect() + }); + let sized_fields = directive + .argument_by_name(&LIST_SIZE_DIRECTIVE_SIZED_FIELDS_ARGUMENT_NAME) + .and_then(|arg| arg.as_list()) + .map(|arg_list| { + arg_list + .iter() + .flat_map(|arg| arg.as_str()) + .map(String::from) + .collect() + }); + let require_one_slicing_argument = directive + .argument_by_name(&LIST_SIZE_DIRECTIVE_REQUIRE_ONE_SLICING_ARGUMENT_ARGUMENT_NAME) + .and_then(|arg| arg.to_bool()) + .unwrap_or(true); + + Ok(Some(Self { + assumed_size, + slicing_argument_names, + sized_fields, + require_one_slicing_argument, + })) + } else { + Ok(None) + } + } + + pub(in crate::plugins::demand_control) fn with_field( + &self, + field: &Field, + ) -> Result { + let mut slicing_arguments: HashMap<&str, i32> = HashMap::new(); + if let Some(slicing_argument_names) = self.slicing_argument_names.as_ref() { + // First, collect the default values for each argument + for argument in &field.definition.arguments { + if slicing_argument_names.contains(argument.name.as_str()) { + if let Some(numeric_value) = + argument.default_value.as_ref().and_then(|v| v.to_i32()) + { + slicing_arguments.insert(&argument.name, numeric_value); + } + } + } + // Then, overwrite any default values with the actual values passed in the query + for argument in &field.arguments { + if slicing_argument_names.contains(argument.name.as_str()) { + if let Some(numeric_value) = argument.value.to_i32() { + slicing_arguments.insert(&argument.name, numeric_value); + } + } + } + + if self.require_one_slicing_argument && slicing_arguments.len() != 1 { + return Err(DemandControlError::QueryParseFailure(format!( + "Exactly one slicing argument is required, but found {}", + slicing_arguments.len() + ))); + } + } + + let expected_size = slicing_arguments + .values() + .max() + .cloned() + .or(self.assumed_size); + + Ok(ListSizeDirective { + expected_size, + sized_fields: self + .sized_fields + .as_ref() + .map(|set| set.iter().map(|s| s.as_str()).collect()), + }) + } +} + pub(in crate::plugins::demand_control) struct RequiresDirective { pub(in crate::plugins::demand_control) fields: SelectionSet, } impl RequiresDirective { - pub(in crate::plugins::demand_control) fn from_field( - field: &Field, + pub(in crate::plugins::demand_control) fn from_field_definition( + definition: &FieldDefinition, parent_type_name: &NamedType, schema: &Valid, ) -> Result, DemandControlError> { - // When a user marks a subgraph schema field with `@requires`, the composition process - // replaces `@requires(field: "")` with `@join__field(requires: "")`. - // - // Note we cannot use `field.definition` in this case: The operation executes against the - // API schema, so its definition pointers point into the API schema. To find the - // `@join__field()` directive, we must instead look up the field on the type with the same - // name in the supergraph. - let definition = schema - .type_field(parent_type_name, &field.name) - .map_err(|_err| { - DemandControlError::QueryParseFailure(format!( - "Could not find the API schema type {}.{} in the supergraph. This looks like a bug", - parent_type_name, &field.name - )) - })?; let requires_arg = definition .directives .get("join__field") diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query.graphql index 86a01356e7..c8494f9697 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query.graphql +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query.graphql @@ -1,3 +1,5 @@ query BasicInputObjectQuery { - getScalarByObject(args: { inner: { id: 1 } }) + getScalarByObject( + args: { inner: { id: 1 }, listOfInner: [{ id: 2 }, { id: 3 }] } + ) } diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query_2.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query_2.graphql new file mode 100644 index 0000000000..26a1a06623 --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query_2.graphql @@ -0,0 +1,8 @@ +query BasicInputObjectQuery2 { + getObjectsByObject( + args: { inner: { id: 1 }, listOfInner: [{ id: 2 }, { id: 3 }] } + ) { + field1 + field2 + } +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_response.json b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_response.json new file mode 100644 index 0000000000..092377bf7f --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_response.json @@ -0,0 +1,9 @@ +{ + "data": { + "getObjectsByObject": [ + { "field1": 1, "field2": "one" }, + { "field1": 2, "field2": "two" }, + { "field1": 3, "field2": "three" } + ] + } +} \ No newline at end of file diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_schema.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_schema.graphql index d613012b0d..17f3046414 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_schema.graphql +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_schema.graphql @@ -7,6 +7,7 @@ type Query { someUnion: UnionOfObjectTypes someObjects: [FirstObjectType] intList: [Int] + getObjectsByObject(args: OuterInput): [SecondObjectType] } type Mutation { @@ -35,4 +36,6 @@ input InnerInput { input OuterInput { inner: InnerInput + inner2: InnerInput + listOfInner: [InnerInput!] } diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_query.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_query.graphql new file mode 100644 index 0000000000..751c8a005e --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_query.graphql @@ -0,0 +1,20 @@ +fragment Items on SizedField { + items { + id + } +} + +{ + fieldWithCost + argWithCost(arg: 3) + enumWithCost + inputWithCost(someInput: { somethingWithCost: 10 }) + scalarWithCost + objectWithCost { + id + } + fieldWithListSize + fieldWithDynamicListSize(first: 5) { + ...Items + } +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_query_with_default_slicing_argument.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_query_with_default_slicing_argument.graphql new file mode 100644 index 0000000000..fb50e08fef --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_query_with_default_slicing_argument.graphql @@ -0,0 +1,20 @@ +fragment Items on SizedField { + items { + id + } +} + +{ + fieldWithCost + argWithCost(arg: 3) + enumWithCost + inputWithCost(someInput: { somethingWithCost: 10 }) + scalarWithCost + objectWithCost { + id + } + fieldWithListSize + fieldWithDynamicListSize { + ...Items + } +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_response.json b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_response.json new file mode 100644 index 0000000000..664a2684e6 --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_response.json @@ -0,0 +1,24 @@ +{ + "data": { + "fieldWithCost": 1, + "argWithCost": 2, + "enumWithCost": "A", + "inputWithCost": 3, + "scalarWithCost": 4, + "objectWithCost": { + "id": 5 + }, + "fieldWithListSize": [ + "first", + "second", + "third" + ], + "fieldWithDynamicListSize": { + "items": [ + { "id": 6 }, + { "id": 7 }, + { "id": 8 } + ] + } + } +} \ No newline at end of file diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema.graphql new file mode 100644 index 0000000000..d966512be1 --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema.graphql @@ -0,0 +1,154 @@ +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link( + url: "https://specs.apollo.dev/cost/v0.1" + import: ["@cost", "@listSize"] + ) { + query: Query +} + +directive @cost( + weight: Int! +) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @cost__listSize( + assumedSize: Int + slicingArguments: [String!] + sizedFields: [String!] + requireOneSlicingArgument: Boolean = true +) on FIELD_DEFINITION + +directive @join__directive( + graphs: [join__Graph!] + name: String! + args: join__DirectiveArguments +) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field( + graph: join__Graph + requires: join__FieldSet + provides: join__FieldSet + type: String + external: Boolean + override: String + usedOverridden: Boolean + overrideLabel: String + contextArguments: [join__ContextArgument!] +) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements( + graph: join__Graph! + interface: String! +) repeatable on OBJECT | INTERFACE + +directive @join__type( + graph: join__Graph! + key: join__FieldSet + extension: Boolean! = false + resolvable: Boolean! = true + isInterfaceObject: Boolean! = false +) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember( + graph: join__Graph! + member: String! +) repeatable on UNION + +directive @link( + url: String + as: String + for: link__Purpose + import: [link__Import] +) repeatable on SCHEMA + +directive @listSize( + assumedSize: Int + slicingArguments: [String!] + sizedFields: [String!] + requireOneSlicingArgument: Boolean = true +) on FIELD_DEFINITION + +type A @join__type(graph: SUBGRAPHWITHLISTSIZE) { + id: ID +} + +enum AorB @join__type(graph: SUBGRAPHWITHCOST) @cost(weight: 15) { + A @join__enumValue(graph: SUBGRAPHWITHCOST) + B @join__enumValue(graph: SUBGRAPHWITHCOST) +} + +scalar ExpensiveInt @join__type(graph: SUBGRAPHWITHCOST) @cost(weight: 30) + +type ExpensiveObject @join__type(graph: SUBGRAPHWITHCOST) @cost(weight: 40) { + id: ID +} + +input InputTypeWithCost @join__type(graph: SUBGRAPHWITHCOST) { + somethingWithCost: Int @cost(weight: 20) +} + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +scalar join__FieldSet + +scalar join__FieldValue + +enum join__Graph { + SUBGRAPHWITHCOST + @join__graph(name: "subgraphWithCost", url: "http://localhost:4001") + SUBGRAPHWITHLISTSIZE + @join__graph(name: "subgraphWithListSize", url: "http://localhost:4002") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: SUBGRAPHWITHCOST) + @join__type(graph: SUBGRAPHWITHLISTSIZE) { + fieldWithCost: Int @join__field(graph: SUBGRAPHWITHCOST) @cost(weight: 5) + argWithCost(arg: Int @cost(weight: 10)): Int + @join__field(graph: SUBGRAPHWITHCOST) + enumWithCost: AorB @join__field(graph: SUBGRAPHWITHCOST) + inputWithCost(someInput: InputTypeWithCost): Int + @join__field(graph: SUBGRAPHWITHCOST) + scalarWithCost: ExpensiveInt @join__field(graph: SUBGRAPHWITHCOST) + objectWithCost: ExpensiveObject @join__field(graph: SUBGRAPHWITHCOST) + fieldWithListSize: [String!] + @join__field(graph: SUBGRAPHWITHLISTSIZE) + @listSize(assumedSize: 2000, requireOneSlicingArgument: false) + fieldWithDynamicListSize(first: Int = 10): SizedField + @join__field(graph: SUBGRAPHWITHLISTSIZE) + @listSize( + slicingArguments: ["first"] + sizedFields: ["items"] + requireOneSlicingArgument: true + ) +} + +type SizedField @join__type(graph: SUBGRAPHWITHLISTSIZE) { + items: [A] +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema_with_renamed_directives.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema_with_renamed_directives.graphql new file mode 100644 index 0000000000..1d1f17263d --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema_with_renamed_directives.graphql @@ -0,0 +1,163 @@ +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link( + url: "https://specs.apollo.dev/cost/v0.1" + import: [ + { name: "@cost", as: "@renamedCost" } + { name: "@listSize", as: "@renamedListSize" } + ] + ) { + query: Query +} + +directive @cost__listSize( + assumedSize: Int + slicingArguments: [String!] + sizedFields: [String!] + requireOneSlicingArgument: Boolean = true +) on FIELD_DEFINITION + +directive @join__directive( + graphs: [join__Graph!] + name: String! + args: join__DirectiveArguments +) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field( + graph: join__Graph + requires: join__FieldSet + provides: join__FieldSet + type: String + external: Boolean + override: String + usedOverridden: Boolean + overrideLabel: String + contextArguments: [join__ContextArgument!] +) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements( + graph: join__Graph! + interface: String! +) repeatable on OBJECT | INTERFACE + +directive @join__type( + graph: join__Graph! + key: join__FieldSet + extension: Boolean! = false + resolvable: Boolean! = true + isInterfaceObject: Boolean! = false +) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember( + graph: join__Graph! + member: String! +) repeatable on UNION + +directive @link( + url: String + as: String + for: link__Purpose + import: [link__Import] +) repeatable on SCHEMA + +directive @renamedCost( + weight: Int! +) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @renamedListSize( + assumedSize: Int + slicingArguments: [String!] + sizedFields: [String!] + requireOneSlicingArgument: Boolean = true +) on FIELD_DEFINITION + +type A @join__type(graph: SUBGRAPHWITHLISTSIZE) { + id: ID +} + +enum AorB @join__type(graph: SUBGRAPHWITHCOST) @renamedCost(weight: 15) { + A @join__enumValue(graph: SUBGRAPHWITHCOST) + B @join__enumValue(graph: SUBGRAPHWITHCOST) +} + +scalar ExpensiveInt + @join__type(graph: SUBGRAPHWITHCOST) + @renamedCost(weight: 30) + +type ExpensiveObject + @join__type(graph: SUBGRAPHWITHCOST) + @renamedCost(weight: 40) { + id: ID +} + +input InputTypeWithCost @join__type(graph: SUBGRAPHWITHCOST) { + somethingWithCost: Int @renamedCost(weight: 20) +} + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +scalar join__FieldSet + +scalar join__FieldValue + +enum join__Graph { + SUBGRAPHWITHCOST + @join__graph(name: "subgraphWithCost", url: "http://localhost:4001") + SUBGRAPHWITHLISTSIZE + @join__graph(name: "subgraphWithListSize", url: "http://localhost:4002") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: SUBGRAPHWITHCOST) + @join__type(graph: SUBGRAPHWITHLISTSIZE) { + fieldWithCost: Int + @join__field(graph: SUBGRAPHWITHCOST) + @renamedCost(weight: 5) + argWithCost(arg: Int @renamedCost(weight: 10)): Int + @join__field(graph: SUBGRAPHWITHCOST) + enumWithCost: AorB @join__field(graph: SUBGRAPHWITHCOST) + inputWithCost(someInput: InputTypeWithCost): Int + @join__field(graph: SUBGRAPHWITHCOST) + scalarWithCost: ExpensiveInt @join__field(graph: SUBGRAPHWITHCOST) + objectWithCost: ExpensiveObject @join__field(graph: SUBGRAPHWITHCOST) + fieldWithListSize: [String!] + @join__field(graph: SUBGRAPHWITHLISTSIZE) + @renamedListSize(assumedSize: 2000, requireOneSlicingArgument: false) + fieldWithDynamicListSize(first: Int = 10): SizedField + @join__field(graph: SUBGRAPHWITHLISTSIZE) + @renamedListSize( + slicingArguments: ["first"] + sizedFields: ["items"] + requireOneSlicingArgument: true + ) +} + +type SizedField @join__type(graph: SUBGRAPHWITHLISTSIZE) { + items: [A] +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/mod.rs b/apollo-router/src/plugins/demand_control/cost_calculator/mod.rs index 290ce4dbe4..a534f91a94 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/mod.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/mod.rs @@ -1,4 +1,5 @@ mod directives; +pub(in crate::plugins::demand_control) mod schema; pub(crate) mod static_cost; use crate::plugins::demand_control::DemandControlError; diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/schema.rs b/apollo-router/src/plugins/demand_control/cost_calculator/schema.rs new file mode 100644 index 0000000000..6a46ee9fe9 --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/schema.rs @@ -0,0 +1,180 @@ +use std::ops::Deref; +use std::sync::Arc; + +use ahash::HashMap; +use ahash::HashMapExt; +use apollo_compiler::schema::ExtendedType; +use apollo_compiler::validation::Valid; +use apollo_compiler::Name; +use apollo_compiler::Schema; + +use super::directives::get_apollo_directive_names; +use super::directives::CostDirective; +use super::directives::DefinitionListSizeDirective as ListSizeDirective; +use super::directives::RequiresDirective; +use crate::plugins::demand_control::DemandControlError; + +pub(crate) struct DemandControlledSchema { + directive_name_map: HashMap, + inner: Arc>, + type_field_cost_directives: HashMap>, + type_field_list_size_directives: HashMap>, + type_field_requires_directives: HashMap>, +} + +impl DemandControlledSchema { + pub(crate) fn new(schema: Arc>) -> Result { + let directive_name_map = get_apollo_directive_names(&schema); + + let mut type_field_cost_directives: HashMap> = + HashMap::new(); + let mut type_field_list_size_directives: HashMap> = + HashMap::new(); + let mut type_field_requires_directives: HashMap> = + HashMap::new(); + + for (type_name, type_) in &schema.types { + let field_cost_directives = type_field_cost_directives + .entry(type_name.clone()) + .or_default(); + let field_list_size_directives = type_field_list_size_directives + .entry(type_name.clone()) + .or_default(); + let field_requires_directives = type_field_requires_directives + .entry(type_name.clone()) + .or_default(); + + match type_ { + ExtendedType::Interface(ty) => { + for field_name in ty.fields.keys() { + let field_definition = schema.type_field(type_name, field_name)?; + let field_type = schema.types.get(field_definition.ty.inner_named_type()).ok_or_else(|| { + DemandControlError::QueryParseFailure(format!( + "Field {} was found in query, but its type is missing from the schema.", + field_name + )) + })?; + + if let Some(cost_directive) = + CostDirective::from_field(&directive_name_map, field_definition) + .or(CostDirective::from_type(&directive_name_map, field_type)) + { + field_cost_directives.insert(field_name.clone(), cost_directive); + } + + if let Some(list_size_directive) = ListSizeDirective::from_field_definition( + &directive_name_map, + field_definition, + )? { + field_list_size_directives + .insert(field_name.clone(), list_size_directive); + } + + if let Some(requires_directive) = RequiresDirective::from_field_definition( + field_definition, + type_name, + &schema, + )? { + field_requires_directives + .insert(field_name.clone(), requires_directive); + } + } + } + ExtendedType::Object(ty) => { + for field_name in ty.fields.keys() { + let field_definition = schema.type_field(type_name, field_name)?; + let field_type = schema.types.get(field_definition.ty.inner_named_type()).ok_or_else(|| { + DemandControlError::QueryParseFailure(format!( + "Field {} was found in query, but its type is missing from the schema.", + field_name + )) + })?; + + if let Some(cost_directive) = + CostDirective::from_field(&directive_name_map, field_definition) + .or(CostDirective::from_type(&directive_name_map, field_type)) + { + field_cost_directives.insert(field_name.clone(), cost_directive); + } + + if let Some(list_size_directive) = ListSizeDirective::from_field_definition( + &directive_name_map, + field_definition, + )? { + field_list_size_directives + .insert(field_name.clone(), list_size_directive); + } + + if let Some(requires_directive) = RequiresDirective::from_field_definition( + field_definition, + type_name, + &schema, + )? { + field_requires_directives + .insert(field_name.clone(), requires_directive); + } + } + } + _ => { + // Other types don't have fields + } + } + } + + Ok(Self { + directive_name_map, + inner: schema, + type_field_cost_directives, + type_field_list_size_directives, + type_field_requires_directives, + }) + } + + pub(in crate::plugins::demand_control) fn directive_name_map(&self) -> &HashMap { + &self.directive_name_map + } + + pub(in crate::plugins::demand_control) fn type_field_cost_directive( + &self, + type_name: &str, + field_name: &str, + ) -> Option<&CostDirective> { + self.type_field_cost_directives + .get(type_name)? + .get(field_name) + } + + pub(in crate::plugins::demand_control) fn type_field_list_size_directive( + &self, + type_name: &str, + field_name: &str, + ) -> Option<&ListSizeDirective> { + self.type_field_list_size_directives + .get(type_name)? + .get(field_name) + } + + pub(in crate::plugins::demand_control) fn type_field_requires_directive( + &self, + type_name: &str, + field_name: &str, + ) -> Option<&RequiresDirective> { + self.type_field_requires_directives + .get(type_name)? + .get(field_name) + } +} + +impl AsRef> for DemandControlledSchema { + fn as_ref(&self) -> &Valid { + &self.inner + } +} + +impl Deref for DemandControlledSchema { + type Target = Schema; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs index 7601ba71e5..4f2e585db3 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs @@ -1,5 +1,7 @@ use std::sync::Arc; +use ahash::HashMap; +use apollo_compiler::ast; use apollo_compiler::ast::InputValueDefinition; use apollo_compiler::ast::NamedType; use apollo_compiler::executable::ExecutableDocument; @@ -9,18 +11,19 @@ use apollo_compiler::executable::InlineFragment; use apollo_compiler::executable::Operation; use apollo_compiler::executable::Selection; use apollo_compiler::executable::SelectionSet; -use apollo_compiler::validation::Valid; -use apollo_compiler::Schema; +use apollo_compiler::schema::ExtendedType; +use apollo_compiler::Node; use serde_json_bytes::Value; use super::directives::IncludeDirective; -use super::directives::RequiresDirective; use super::directives::SkipDirective; +use super::schema::DemandControlledSchema; use super::DemandControlError; use crate::graphql::Response; use crate::graphql::ResponseVisitor; +use crate::plugins::demand_control::cost_calculator::directives::CostDirective; +use crate::plugins::demand_control::cost_calculator::directives::ListSizeDirective; use crate::query_planner::fetch::SubgraphOperation; -use crate::query_planner::fetch::SubgraphSchemas; use crate::query_planner::DeferredNode; use crate::query_planner::PlanNode; use crate::query_planner::Primary; @@ -28,13 +31,74 @@ use crate::query_planner::QueryPlan; pub(crate) struct StaticCostCalculator { list_size: u32, - subgraph_schemas: Arc, + supergraph_schema: Arc, + subgraph_schemas: Arc>, +} + +fn score_argument( + argument: &apollo_compiler::ast::Value, + argument_definition: &Node, + schema: &DemandControlledSchema, +) -> Result { + let cost_directive = + CostDirective::from_argument(schema.directive_name_map(), argument_definition); + let ty = schema + .types + .get(argument_definition.ty.inner_named_type()) + .ok_or_else(|| { + DemandControlError::QueryParseFailure(format!( + "Argument {} was found in query, but its type ({}) was not found in the schema", + argument_definition.name, + argument_definition.ty.inner_named_type() + )) + })?; + + match (argument, ty) { + (_, ExtendedType::Interface(_)) + | (_, ExtendedType::Object(_)) + | (_, ExtendedType::Union(_)) => Err(DemandControlError::QueryParseFailure( + format!( + "Argument {} has type {}, but objects, interfaces, and unions are disallowed in this position", + argument_definition.name, + argument_definition.ty.inner_named_type() + ) + )), + + (ast::Value::Object(inner_args), ExtendedType::InputObject(inner_arg_defs)) => { + let mut cost = cost_directive.map_or(1.0, |cost| cost.weight()); + for (arg_name, arg_val) in inner_args { + let arg_def = inner_arg_defs.fields.get(arg_name).ok_or_else(|| { + DemandControlError::QueryParseFailure(format!( + "Argument {} was found in query, but its type ({}) was not found in the schema", + argument_definition.name, + argument_definition.ty.inner_named_type() + )) + })?; + cost += score_argument(arg_val, arg_def, schema)?; + } + Ok(cost) + } + (ast::Value::List(inner_args), _) => { + let mut cost = cost_directive.map_or(0.0, |cost| cost.weight()); + for arg_val in inner_args { + cost += score_argument(arg_val, argument_definition, schema)?; + } + Ok(cost) + } + (ast::Value::Null, _) => Ok(0.0), + _ => Ok(cost_directive.map_or(0.0, |cost| cost.weight())) + } } impl StaticCostCalculator { - pub(crate) fn new(subgraph_schemas: Arc, list_size: u32) -> Self { + pub(crate) fn new( + supergraph_schema: Arc, + subgraph_schemas: Arc>, + list_size: u32, + ) -> Self { Self { list_size, + supergraph_schema, subgraph_schemas, } } @@ -61,14 +125,18 @@ impl StaticCostCalculator { &self, field: &Field, parent_type: &NamedType, - schema: &Valid, + schema: &DemandControlledSchema, executable: &ExecutableDocument, should_estimate_requires: bool, + list_size_from_upstream: Option, ) -> Result { if StaticCostCalculator::skipped_by_directives(field) { return Ok(0.0); } + // We need to look up the `FieldDefinition` from the supergraph schema instead of using `field.definition` + // because `field.definition` was generated from the API schema, which strips off the directives we need. + let definition = schema.type_field(parent_type, &field.name)?; let ty = field.inner_type_def(schema).ok_or_else(|| { DemandControlError::QueryParseFailure(format!( "Field {} was found in query, but its type is missing from the schema.", @@ -76,17 +144,32 @@ impl StaticCostCalculator { )) })?; - // Determine how many instances we're scoring. If there's no user-provided - // information, assume lists have 100 items. - let instance_count = if field.ty().is_list() { - self.list_size as f64 + let list_size_directive = + match schema.type_field_list_size_directive(parent_type, &field.name) { + Some(dir) => dir.with_field(field).map(Some), + None => Ok(None), + }?; + let instance_count = if !field.ty().is_list() { + 1 + } else if let Some(value) = list_size_from_upstream { + // This is a sized field whose length is defined by the `@listSize` directive on the parent field + value + } else if let Some(expected_size) = list_size_directive + .as_ref() + .and_then(|dir| dir.expected_size) + { + expected_size } else { - 1.0 + self.list_size as i32 }; // Determine the cost for this particular field. Scalars are free, non-scalars are not. // For fields with selections, add in the cost of the selections as well. - let mut type_cost = if ty.is_interface() || ty.is_object() || ty.is_union() { + let mut type_cost = if let Some(cost_directive) = + schema.type_field_cost_directive(parent_type, &field.name) + { + cost_directive.weight() + } else if ty.is_interface() || ty.is_object() || ty.is_union() { 1.0 } else { 0.0 @@ -97,10 +180,19 @@ impl StaticCostCalculator { schema, executable, should_estimate_requires, + list_size_directive.as_ref(), )?; - for argument in &field.definition.arguments { - type_cost += Self::score_argument(argument, schema)?; + let mut arguments_cost = 0.0; + for argument in &field.arguments { + let argument_definition = + definition.argument_by_name(&argument.name).ok_or_else(|| { + DemandControlError::QueryParseFailure(format!( + "Argument {} of field {} is missing a definition in the schema", + argument.name, field.name + )) + })?; + arguments_cost += score_argument(&argument.value, argument_definition, schema)?; } let mut requirements_cost = 0.0; @@ -108,25 +200,28 @@ impl StaticCostCalculator { // If the field is marked with `@requires`, the required selection may not be included // in the query's selection. Adding that requirement's cost to the field ensures it's // accounted for. - let requirements = - RequiresDirective::from_field(field, parent_type, schema)?.map(|d| d.fields); + let requirements = schema + .type_field_requires_directive(parent_type, &field.name) + .map(|d| &d.fields); if let Some(selection_set) = requirements { requirements_cost = self.score_selection_set( - &selection_set, + selection_set, parent_type, schema, executable, should_estimate_requires, + list_size_directive.as_ref(), )?; } } - let cost = instance_count * type_cost + requirements_cost; + let cost = (instance_count as f64) * type_cost + arguments_cost + requirements_cost; tracing::debug!( - "Field {} cost breakdown: (count) {} * (type cost) {} + (requirements) {} = {}", + "Field {} cost breakdown: (count) {} * (type cost) {} + (arguments) {} + (requirements) {} = {}", field.name, instance_count, type_cost, + arguments_cost, requirements_cost, cost ); @@ -134,47 +229,14 @@ impl StaticCostCalculator { Ok(cost) } - fn score_argument( - argument: &InputValueDefinition, - schema: &Valid, - ) -> Result { - if let Some(ty) = schema.types.get(argument.ty.inner_named_type().as_str()) { - match ty { - apollo_compiler::schema::ExtendedType::InputObject(inner_arguments) => { - let mut cost = 1.0; - for inner_argument in inner_arguments.fields.values() { - cost += Self::score_argument(inner_argument, schema)?; - } - Ok(cost) - } - - apollo_compiler::schema::ExtendedType::Scalar(_) - | apollo_compiler::schema::ExtendedType::Enum(_) => Ok(0.0), - - apollo_compiler::schema::ExtendedType::Object(_) - | apollo_compiler::schema::ExtendedType::Interface(_) - | apollo_compiler::schema::ExtendedType::Union(_) => { - Err(DemandControlError::QueryParseFailure( - format!("Argument {} has type {}, but objects, interfaces, and unions are disallowed in this position", argument.name, argument.ty.inner_named_type()) - )) - } - } - } else { - Err(DemandControlError::QueryParseFailure(format!( - "Argument {} was found in query, but its type ({}) was not found in the schema", - argument.name, - argument.ty.inner_named_type() - ))) - } - } - fn score_fragment_spread( &self, fragment_spread: &FragmentSpread, parent_type: &NamedType, - schema: &Valid, + schema: &DemandControlledSchema, executable: &ExecutableDocument, should_estimate_requires: bool, + list_size_directive: Option<&ListSizeDirective>, ) -> Result { let fragment = fragment_spread.fragment_def(executable).ok_or_else(|| { DemandControlError::QueryParseFailure(format!( @@ -188,6 +250,7 @@ impl StaticCostCalculator { schema, executable, should_estimate_requires, + list_size_directive, ) } @@ -195,9 +258,10 @@ impl StaticCostCalculator { &self, inline_fragment: &InlineFragment, parent_type: &NamedType, - schema: &Valid, + schema: &DemandControlledSchema, executable: &ExecutableDocument, should_estimate_requires: bool, + list_size_directive: Option<&ListSizeDirective>, ) -> Result { self.score_selection_set( &inline_fragment.selection_set, @@ -205,13 +269,14 @@ impl StaticCostCalculator { schema, executable, should_estimate_requires, + list_size_directive, ) } fn score_operation( &self, operation: &Operation, - schema: &Valid, + schema: &DemandControlledSchema, executable: &ExecutableDocument, should_estimate_requires: bool, ) -> Result { @@ -230,6 +295,7 @@ impl StaticCostCalculator { schema, executable, should_estimate_requires, + None, )?; Ok(cost) @@ -239,20 +305,27 @@ impl StaticCostCalculator { &self, selection: &Selection, parent_type: &NamedType, - schema: &Valid, + schema: &DemandControlledSchema, executable: &ExecutableDocument, should_estimate_requires: bool, + list_size_directive: Option<&ListSizeDirective>, ) -> Result { match selection { - Selection::Field(f) => { - self.score_field(f, parent_type, schema, executable, should_estimate_requires) - } + Selection::Field(f) => self.score_field( + f, + parent_type, + schema, + executable, + should_estimate_requires, + list_size_directive.and_then(|dir| dir.size_of(f)), + ), Selection::FragmentSpread(s) => self.score_fragment_spread( s, parent_type, schema, executable, should_estimate_requires, + list_size_directive, ), Selection::InlineFragment(i) => self.score_inline_fragment( i, @@ -260,6 +333,7 @@ impl StaticCostCalculator { schema, executable, should_estimate_requires, + list_size_directive, ), } } @@ -268,9 +342,10 @@ impl StaticCostCalculator { &self, selection_set: &SelectionSet, parent_type_name: &NamedType, - schema: &Valid, + schema: &DemandControlledSchema, executable: &ExecutableDocument, should_estimate_requires: bool, + list_size_directive: Option<&ListSizeDirective>, ) -> Result { let mut cost = 0.0; for selection in selection_set.selections.iter() { @@ -280,6 +355,7 @@ impl StaticCostCalculator { schema, executable, should_estimate_requires, + list_size_directive, )?; } Ok(cost) @@ -386,7 +462,7 @@ impl StaticCostCalculator { pub(crate) fn estimated( &self, query: &ExecutableDocument, - schema: &Valid, + schema: &DemandControlledSchema, should_estimate_requires: bool, ) -> Result { let mut cost = 0.0; @@ -408,39 +484,75 @@ impl StaticCostCalculator { request: &ExecutableDocument, response: &Response, ) -> Result { - let mut visitor = ResponseCostCalculator::new(); + let mut visitor = ResponseCostCalculator::new(&self.supergraph_schema); visitor.visit(request, response); Ok(visitor.cost) } } -pub(crate) struct ResponseCostCalculator { +pub(crate) struct ResponseCostCalculator<'a> { pub(crate) cost: f64, + schema: &'a DemandControlledSchema, } -impl ResponseCostCalculator { - pub(crate) fn new() -> Self { - Self { cost: 0.0 } +impl<'schema> ResponseCostCalculator<'schema> { + pub(crate) fn new(schema: &'schema DemandControlledSchema) -> Self { + Self { cost: 0.0, schema } } } -impl ResponseVisitor for ResponseCostCalculator { +impl<'schema> ResponseVisitor for ResponseCostCalculator<'schema> { fn visit_field( &mut self, request: &ExecutableDocument, - _ty: &NamedType, + parent_ty: &NamedType, field: &Field, value: &Value, ) { + self.visit_list_item(request, parent_ty, field, value); + + let definition = self.schema.type_field(parent_ty, &field.name); + for argument in &field.arguments { + if let Ok(Some(argument_definition)) = definition + .as_ref() + .map(|def| def.argument_by_name(&argument.name)) + { + if let Ok(score) = score_argument(&argument.value, argument_definition, self.schema) + { + self.cost += score; + } + } else { + tracing::warn!( + "Failed to get schema definition for argument {} of field {}. The resulting actual cost will be a partial result.", + argument.name, + field.name + ) + } + } + } + + fn visit_list_item( + &mut self, + request: &apollo_compiler::ExecutableDocument, + parent_ty: &apollo_compiler::executable::NamedType, + field: &apollo_compiler::executable::Field, + value: &Value, + ) { + let cost_directive = self + .schema + .type_field_cost_directive(parent_ty, &field.name); + match value { - Value::Null | Value::Bool(_) | Value::Number(_) | Value::String(_) => {} + Value::Null | Value::Bool(_) | Value::Number(_) | Value::String(_) => { + self.cost += cost_directive.map_or(0.0, |cost| cost.weight()); + } Value::Array(items) => { for item in items { - self.visit_field(request, field.ty().inner_named_type(), field, item); + self.visit_list_item(request, parent_ty, field, item); } } Value::Object(children) => { - self.cost += 1.0; + self.cost += cost_directive.map_or(1.0, |cost| cost.weight()); self.visit_selections(request, &field.selection_set, children); } } @@ -451,19 +563,26 @@ impl ResponseVisitor for ResponseCostCalculator { mod tests { use std::sync::Arc; + use ahash::HashMapExt; + use apollo_federation::query_plan::query_planner::QueryPlanner; use bytes::Bytes; use test_log::test; - use tower::Service; use super::*; - use crate::query_planner::BridgeQueryPlanner; use crate::services::layers::query_analysis::ParsedDocument; - use crate::services::QueryPlannerContent; - use crate::services::QueryPlannerRequest; use crate::spec; use crate::spec::Query; use crate::Configuration; - use crate::Context; + + impl StaticCostCalculator { + fn rust_planned( + &self, + query_plan: &apollo_federation::query_plan::QueryPlan, + ) -> Result { + let js_planner_node: PlanNode = query_plan.node.as_ref().unwrap().into(); + self.score_plan_node(&js_planner_node) + } + } fn parse_schema_and_operation( schema_str: &str, @@ -479,8 +598,12 @@ mod tests { fn estimated_cost(schema_str: &str, query_str: &str) -> f64 { let (schema, query) = parse_schema_and_operation(schema_str, query_str, &Default::default()); - StaticCostCalculator::new(Default::default(), 100) - .estimated(&query.executable, schema.supergraph_schema(), true) + let schema = + DemandControlledSchema::new(Arc::new(schema.supergraph_schema().clone())).unwrap(); + let calculator = StaticCostCalculator::new(Arc::new(schema), Default::default(), 100); + + calculator + .estimated(&query.executable, &calculator.supergraph_schema, true) .unwrap() } @@ -494,8 +617,11 @@ mod tests { "query.graphql", ) .unwrap(); - StaticCostCalculator::new(Default::default(), 100) - .estimated(&query, &schema, true) + let schema = DemandControlledSchema::new(Arc::new(schema)).unwrap(); + let calculator = StaticCostCalculator::new(Arc::new(schema), Default::default(), 100); + + calculator + .estimated(&query, &calculator.supergraph_schema, true) .unwrap() } @@ -503,40 +629,59 @@ mod tests { let config: Arc = Arc::new(Default::default()); let (schema, query) = parse_schema_and_operation(schema_str, query_str, &config); - let mut planner = BridgeQueryPlanner::new(schema.into(), config.clone(), None, None) - .await - .unwrap(); + let planner = + QueryPlanner::new(schema.federation_supergraph(), Default::default()).unwrap(); - let ctx = Context::new(); - ctx.extensions() - .with_lock(|mut lock| lock.insert::(query)); + let query_plan = planner.build_query_plan(&query.executable, None).unwrap(); - let planner_res = planner - .call(QueryPlannerRequest::new(query_str.to_string(), None, ctx)) - .await - .unwrap(); - let query_plan = match planner_res.content.unwrap() { - QueryPlannerContent::Plan { plan } => plan, - _ => panic!("Query planner returned unexpected non-plan content"), - }; + let schema = + DemandControlledSchema::new(Arc::new(schema.supergraph_schema().clone())).unwrap(); + let mut demand_controlled_subgraph_schemas = HashMap::new(); + for (subgraph_name, subgraph_schema) in planner.subgraph_schemas().iter() { + let demand_controlled_subgraph_schema = + DemandControlledSchema::new(Arc::new(subgraph_schema.schema().clone())).unwrap(); + demand_controlled_subgraph_schemas + .insert(subgraph_name.to_string(), demand_controlled_subgraph_schema); + } - let calculator = StaticCostCalculator { - subgraph_schemas: planner.subgraph_schemas(), - list_size: 100, - }; + let calculator = StaticCostCalculator::new( + Arc::new(schema), + Arc::new(demand_controlled_subgraph_schemas), + 100, + ); - calculator.planned(&query_plan).unwrap() + calculator.rust_planned(&query_plan).unwrap() } fn actual_cost(schema_str: &str, query_str: &str, response_bytes: &'static [u8]) -> f64 { - let (_schema, query) = + let (schema, query) = parse_schema_and_operation(schema_str, query_str, &Default::default()); let response = Response::from_bytes("test", Bytes::from(response_bytes)).unwrap(); - StaticCostCalculator::new(Default::default(), 100) + let schema = + DemandControlledSchema::new(Arc::new(schema.supergraph_schema().clone())).unwrap(); + StaticCostCalculator::new(Arc::new(schema), Default::default(), 100) .actual(&query.executable, &response) .unwrap() } + /// Actual cost of an operation on a plain, non-federated schema. + fn basic_actual_cost(schema_str: &str, query_str: &str, response_bytes: &'static [u8]) -> f64 { + let schema = + apollo_compiler::Schema::parse_and_validate(schema_str, "schema.graphqls").unwrap(); + let query = apollo_compiler::ExecutableDocument::parse_and_validate( + &schema, + query_str, + "query.graphql", + ) + .unwrap(); + let response = Response::from_bytes("test", Bytes::from(response_bytes)).unwrap(); + + let schema = DemandControlledSchema::new(Arc::new(schema)).unwrap(); + StaticCostCalculator::new(Arc::new(schema), Default::default(), 100) + .actual(&query, &response) + .unwrap() + } + #[test] fn query_cost() { let schema = include_str!("./fixtures/basic_schema.graphql"); @@ -606,7 +751,18 @@ mod tests { let schema = include_str!("./fixtures/basic_schema.graphql"); let query = include_str!("./fixtures/basic_input_object_query.graphql"); - assert_eq!(basic_estimated_cost(schema, query), 2.0) + assert_eq!(basic_estimated_cost(schema, query), 4.0) + } + + #[test] + fn input_object_cost_with_returned_objects() { + let schema = include_str!("./fixtures/basic_schema.graphql"); + let query = include_str!("./fixtures/basic_input_object_query_2.graphql"); + let response = include_bytes!("./fixtures/basic_input_object_response.json"); + + assert_eq!(basic_estimated_cost(schema, query), 104.0); + // The cost of the arguments from the query should be included when scoring the response + assert_eq!(basic_actual_cost(schema, query, response), 7.0); } #[test] @@ -684,15 +840,55 @@ mod tests { let schema = include_str!("./fixtures/federated_ships_schema.graphql"); let query = include_str!("./fixtures/federated_ships_deferred_query.graphql"); let (schema, query) = parse_schema_and_operation(schema, query, &Default::default()); + let schema = Arc::new( + DemandControlledSchema::new(Arc::new(schema.supergraph_schema().clone())).unwrap(), + ); - let conservative_estimate = StaticCostCalculator::new(Default::default(), 100) - .estimated(&query.executable, schema.supergraph_schema(), true) + let calculator = StaticCostCalculator::new(schema.clone(), Default::default(), 100); + let conservative_estimate = calculator + .estimated(&query.executable, &calculator.supergraph_schema, true) .unwrap(); - let narrow_estimate = StaticCostCalculator::new(Default::default(), 5) - .estimated(&query.executable, schema.supergraph_schema(), true) + + let calculator = StaticCostCalculator::new(schema.clone(), Default::default(), 5); + let narrow_estimate = calculator + .estimated(&query.executable, &calculator.supergraph_schema, true) .unwrap(); assert_eq!(conservative_estimate, 10200.0); assert_eq!(narrow_estimate, 35.0); } + + #[test(tokio::test)] + async fn custom_cost_query() { + let schema = include_str!("./fixtures/custom_cost_schema.graphql"); + let query = include_str!("./fixtures/custom_cost_query.graphql"); + let response = include_bytes!("./fixtures/custom_cost_response.json"); + + assert_eq!(estimated_cost(schema, query), 127.0); + assert_eq!(planned_cost(schema, query).await, 127.0); + assert_eq!(actual_cost(schema, query, response), 125.0); + } + + #[test(tokio::test)] + async fn custom_cost_query_with_renamed_directives() { + let schema = include_str!("./fixtures/custom_cost_schema_with_renamed_directives.graphql"); + let query = include_str!("./fixtures/custom_cost_query.graphql"); + let response = include_bytes!("./fixtures/custom_cost_response.json"); + + assert_eq!(estimated_cost(schema, query), 127.0); + assert_eq!(planned_cost(schema, query).await, 127.0); + assert_eq!(actual_cost(schema, query, response), 125.0); + } + + #[test(tokio::test)] + async fn custom_cost_query_with_default_slicing_argument() { + let schema = include_str!("./fixtures/custom_cost_schema.graphql"); + let query = + include_str!("./fixtures/custom_cost_query_with_default_slicing_argument.graphql"); + let response = include_bytes!("./fixtures/custom_cost_response.json"); + + assert_eq!(estimated_cost(schema, query), 132.0); + assert_eq!(planned_cost(schema, query).await, 132.0); + assert_eq!(actual_cost(schema, query, response), 125.0); + } } diff --git a/apollo-router/src/plugins/demand_control/mod.rs b/apollo-router/src/plugins/demand_control/mod.rs index 476deeb737..bf0cdf5f26 100644 --- a/apollo-router/src/plugins/demand_control/mod.rs +++ b/apollo-router/src/plugins/demand_control/mod.rs @@ -5,6 +5,9 @@ use std::future; use std::ops::ControlFlow; use std::sync::Arc; +use ahash::HashMap; +use ahash::HashMapExt; +use apollo_compiler::schema::FieldLookupError; use apollo_compiler::validation::Valid; use apollo_compiler::validation::WithErrors; use apollo_compiler::ExecutableDocument; @@ -27,6 +30,7 @@ use crate::json_ext::Object; use crate::layers::ServiceBuilderExt; use crate::plugin::Plugin; use crate::plugin::PluginInit; +use crate::plugins::demand_control::cost_calculator::schema::DemandControlledSchema; use crate::plugins::demand_control::strategy::Strategy; use crate::plugins::demand_control::strategy::StrategyFactory; use crate::register_plugin; @@ -199,6 +203,22 @@ impl From> for DemandControlError { } } +impl<'a> From> for DemandControlError { + fn from(value: FieldLookupError) -> Self { + match value { + FieldLookupError::NoSuchType => DemandControlError::QueryParseFailure( + "Attempted to look up a type which does not exist in the schema".to_string(), + ), + FieldLookupError::NoSuchField(type_name, _) => { + DemandControlError::QueryParseFailure(format!( + "Attempted to look up a field on type {}, but the field does not exist", + type_name + )) + } + } + } +} + pub(crate) struct DemandControl { config: DemandControlConfig, strategy_factory: StrategyFactory, @@ -223,11 +243,21 @@ impl Plugin for DemandControl { type Config = DemandControlConfig; async fn new(init: PluginInit) -> Result { + let demand_controlled_supergraph_schema = + DemandControlledSchema::new(init.supergraph_schema.clone())?; + let mut demand_controlled_subgraph_schemas = HashMap::new(); + for (subgraph_name, subgraph_schema) in init.subgraph_schemas.iter() { + let demand_controlled_subgraph_schema = + DemandControlledSchema::new(subgraph_schema.clone())?; + demand_controlled_subgraph_schemas + .insert(subgraph_name.clone(), demand_controlled_subgraph_schema); + } + Ok(DemandControl { strategy_factory: StrategyFactory::new( init.config.clone(), - init.supergraph_schema.clone(), - init.subgraph_schemas.clone(), + Arc::new(demand_controlled_supergraph_schema), + Arc::new(demand_controlled_subgraph_schemas), ), config: init.config, }) diff --git a/apollo-router/src/plugins/demand_control/strategy/mod.rs b/apollo-router/src/plugins/demand_control/strategy/mod.rs index 5defca64d5..6bae126694 100644 --- a/apollo-router/src/plugins/demand_control/strategy/mod.rs +++ b/apollo-router/src/plugins/demand_control/strategy/mod.rs @@ -1,11 +1,10 @@ -use std::collections::HashMap; use std::sync::Arc; -use apollo_compiler::validation::Valid; +use ahash::HashMap; use apollo_compiler::ExecutableDocument; -use apollo_compiler::Schema; use crate::graphql; +use crate::plugins::demand_control::cost_calculator::schema::DemandControlledSchema; use crate::plugins::demand_control::cost_calculator::static_cost::StaticCostCalculator; use crate::plugins::demand_control::strategy::static_estimated::StaticEstimated; use crate::plugins::demand_control::DemandControlConfig; @@ -75,15 +74,15 @@ impl Strategy { pub(crate) struct StrategyFactory { config: DemandControlConfig, #[allow(dead_code)] - supergraph_schema: Arc>, - subgraph_schemas: Arc>>>, + supergraph_schema: Arc, + subgraph_schemas: Arc>, } impl StrategyFactory { pub(crate) fn new( config: DemandControlConfig, - supergraph_schema: Arc>, - subgraph_schemas: Arc>>>, + supergraph_schema: Arc, + subgraph_schemas: Arc>, ) -> Self { Self { config, @@ -97,6 +96,7 @@ impl StrategyFactory { StrategyConfig::StaticEstimated { list_size, max } => Arc::new(StaticEstimated { max: *max, cost_calculator: StaticCostCalculator::new( + self.supergraph_schema.clone(), self.subgraph_schemas.clone(), *list_size, ), From fe8cce89dad712ab746bbabbfe420a032c9939a2 Mon Sep 17 00:00:00 2001 From: Dylan Anthony Date: Tue, 20 Aug 2024 12:08:51 -0600 Subject: [PATCH 085/108] Remove actix dependencies by switching fuzz subgraph to axum (#5848) --- Cargo.lock | 394 ++++---------------------------------- fuzz/subgraph/Cargo.toml | 9 +- fuzz/subgraph/src/main.rs | 69 ++----- 3 files changed, 64 insertions(+), 408 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 11a516f5dc..55ca449bc6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -24,242 +24,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "actix" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de7fa236829ba0841304542f7614c42b80fca007455315c45c785ccfa873a85b" -dependencies = [ - "actix-macros", - "actix-rt", - "actix_derive", - "bitflags 2.6.0", - "bytes", - "crossbeam-channel", - "futures-core", - "futures-sink", - "futures-task", - "futures-util", - "log", - "once_cell", - "parking_lot", - "pin-project-lite", - "smallvec", - "tokio", - "tokio-util", -] - -[[package]] -name = "actix-codec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" -dependencies = [ - "bitflags 2.6.0", - "bytes", - "futures-core", - "futures-sink", - "memchr", - "pin-project-lite", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "actix-http" -version = "3.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae682f693a9cd7b058f2b0b5d9a6d7728a8555779bedbbc35dd88528611d020" -dependencies = [ - "actix-codec", - "actix-rt", - "actix-service", - "actix-utils", - "ahash", - "base64 0.22.1", - "bitflags 2.6.0", - "brotli 6.0.0", - "bytes", - "bytestring", - "derive_more", - "encoding_rs", - "flate2", - "futures-core", - "h2", - "http 0.2.12", - "httparse", - "httpdate", - "itoa", - "language-tags", - "local-channel", - "mime", - "percent-encoding", - "pin-project-lite", - "rand 0.8.5", - "sha1", - "smallvec", - "tokio", - "tokio-util", - "tracing", - "zstd", -] - -[[package]] -name = "actix-macros" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" -dependencies = [ - "quote", - "syn 2.0.71", -] - -[[package]] -name = "actix-router" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d324164c51f63867b57e73ba5936ea151b8a41a1d23d1031eeb9f70d0236f8" -dependencies = [ - "bytestring", - "cfg-if 1.0.0", - "http 0.2.12", - "regex", - "regex-lite", - "serde", - "tracing", -] - -[[package]] -name = "actix-rt" -version = "2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eda4e2a6e042aa4e55ac438a2ae052d3b5da0ecf83d7411e1a368946925208" -dependencies = [ - "futures-core", - "tokio", -] - -[[package]] -name = "actix-server" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b02303ce8d4e8be5b855af6cf3c3a08f3eff26880faad82bab679c22d3650cb5" -dependencies = [ - "actix-rt", - "actix-service", - "actix-utils", - "futures-core", - "futures-util", - "mio", - "socket2 0.5.7", - "tokio", - "tracing", -] - -[[package]] -name = "actix-service" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b894941f818cfdc7ccc4b9e60fa7e53b5042a2e8567270f9147d5591893373a" -dependencies = [ - "futures-core", - "paste", - "pin-project-lite", -] - -[[package]] -name = "actix-utils" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88a1dcdff1466e3c2488e1cb5c36a71822750ad43839937f85d2f4d9f8b705d8" -dependencies = [ - "local-waker", - "pin-project-lite", -] - -[[package]] -name = "actix-web" -version = "4.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1988c02af8d2b718c05bc4aeb6a66395b7cdf32858c2c71131e5637a8c05a9ff" -dependencies = [ - "actix-codec", - "actix-http", - "actix-macros", - "actix-router", - "actix-rt", - "actix-server", - "actix-service", - "actix-utils", - "actix-web-codegen", - "ahash", - "bytes", - "bytestring", - "cfg-if 1.0.0", - "cookie 0.16.2", - "derive_more", - "encoding_rs", - "futures-core", - "futures-util", - "itoa", - "language-tags", - "log", - "mime", - "once_cell", - "pin-project-lite", - "regex", - "regex-lite", - "serde", - "serde_json", - "serde_urlencoded", - "smallvec", - "socket2 0.5.7", - "time", - "url", -] - -[[package]] -name = "actix-web-actors" -version = "4.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420b001bb709d8510c3e2659dae046e54509ff9528018d09c78381e765a1f9fa" -dependencies = [ - "actix", - "actix-codec", - "actix-http", - "actix-web", - "bytes", - "bytestring", - "futures-core", - "pin-project-lite", - "tokio", - "tokio-util", -] - -[[package]] -name = "actix-web-codegen" -version = "4.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f591380e2e68490b5dfaf1dd1aa0ebe78d84ba7067078512b4ea6e4492d622b8" -dependencies = [ - "actix-router", - "proc-macro2", - "quote", - "syn 2.0.71", -] - -[[package]] -name = "actix_derive" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c7db3d5a9718568e4cf4a537cfd7070e6e6ff7481510d0237fb529ac850f6d3" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.71", -] - [[package]] name = "add-timestamp-header" version = "0.1.0" @@ -494,7 +258,7 @@ dependencies = [ "clap", "console", "console-subscriber", - "cookie 0.18.1", + "cookie", "crossbeam-channel", "dashmap", "derivative", @@ -869,9 +633,9 @@ dependencies = [ [[package]] name = "async-graphql" -version = "5.0.10" +version = "6.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35ef8f9be23ee30fe1eb1cf175c689bc33517c6c6d0fd0669dade611e5ced7f" +checksum = "298a5d587d6e6fdb271bf56af2dc325a80eb291fd0fc979146584b9a05494a8c" dependencies = [ "async-graphql-derive", "async-graphql-parser", @@ -885,7 +649,7 @@ dependencies = [ "futures-util", "handlebars 4.5.0", "http 0.2.12", - "indexmap 1.9.3", + "indexmap 2.2.6", "mime", "multer", "num-traits", @@ -901,28 +665,28 @@ dependencies = [ ] [[package]] -name = "async-graphql-actix-web" -version = "5.0.10" +name = "async-graphql-axum" +version = "6.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75e3d335639e722213bdd120f77a66f531bde8bbcff1b19ab8e542f82aed7f48" +checksum = "01a1c20a2059bffbc95130715b23435a05168c518fba9709c81fa2a38eed990c" dependencies = [ - "actix", - "actix-http", - "actix-web", - "actix-web-actors", - "async-channel 1.9.0", "async-graphql", - "futures-channel", + "async-trait", + "axum", + "bytes", "futures-util", "serde_json", - "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tower-service", ] [[package]] name = "async-graphql-derive" -version = "5.0.10" +version = "6.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a0f6ceed3640b4825424da70a5107e79d48d9b2bc6318dfc666b2fc4777f8c4" +checksum = "c7f329c7eb9b646a72f70c9c4b516c70867d356ec46cb00dcac8ad343fd006b0" dependencies = [ "Inflector", "async-graphql-parser", @@ -930,15 +694,16 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "strum 0.25.0", + "syn 2.0.71", "thiserror", ] [[package]] name = "async-graphql-parser" -version = "5.0.10" +version = "6.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc308cd3bc611ee86c9cf19182d2b5ee583da40761970e41207f088be3db18f" +checksum = "6139181845757fd6a73fbb8839f3d036d7150b798db0e9bb3c6e83cdd65bd53b" dependencies = [ "async-graphql-value", "pest", @@ -948,12 +713,12 @@ dependencies = [ [[package]] name = "async-graphql-value" -version = "5.0.10" +version = "6.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d461325bfb04058070712296601dfe5e5bd6cdff84780a0a8c569ffb15c87eb3" +checksum = "323a5143f5bdd2030f45e3f2e0c821c9b1d36e79cf382129c64299c50a7f3750" dependencies = [ "bytes", - "indexmap 1.9.3", + "indexmap 2.2.6", "serde", "serde_json", ] @@ -1124,17 +889,6 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - [[package]] name = "auth-git2" version = "0.5.4" @@ -1747,15 +1501,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bytestring" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d80203ea6b29df88012294f62733de21cfeab47f17b41af3a38bc30a03ee72" -dependencies = [ - "bytes", -] - [[package]] name = "cache-control" version = "0.1.0" @@ -1906,7 +1651,7 @@ dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.1", + "strsim", ] [[package]] @@ -2078,17 +1823,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" -[[package]] -name = "cookie" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e859cd57d0710d9e06c381b550c06e76992472a8c6d527aecd2fc673dcc231fb" -dependencies = [ - "percent-encoding", - "time", - "version_check", -] - [[package]] name = "cookie" version = "0.18.1" @@ -2323,9 +2057,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.4" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ "darling_core", "darling_macro", @@ -2333,27 +2067,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.4" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn 1.0.109", + "strsim", + "syn 2.0.71", ] [[package]] name = "darling_macro" -version = "0.14.4" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 1.0.109", + "syn 2.0.71", ] [[package]] @@ -2789,19 +2523,6 @@ dependencies = [ "syn 2.0.71", ] -[[package]] -name = "env_logger" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - [[package]] name = "env_logger" version = "0.10.2" @@ -2907,10 +2628,10 @@ dependencies = [ name = "everything-subgraph" version = "0.1.0" dependencies = [ - "actix-web", "async-graphql", - "async-graphql-actix-web", - "env_logger 0.9.3", + "async-graphql-axum", + "axum", + "env_logger", "futures", "lazy_static", "log", @@ -2918,6 +2639,7 @@ dependencies = [ "rand 0.8.5", "serde_json", "tokio", + "tower", ] [[package]] @@ -3599,15 +3321,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - [[package]] name = "hermit-abi" version = "0.3.9" @@ -3893,7 +3606,6 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", - "serde", ] [[package]] @@ -4200,12 +3912,6 @@ dependencies = [ "log", ] -[[package]] -name = "language-tags" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388" - [[package]] name = "lazy-regex" version = "2.5.0" @@ -4371,23 +4077,6 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" -[[package]] -name = "local-channel" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6cbc85e69b8df4b8bb8b89ec634e7189099cea8927a276b7384ce5488e53ec8" -dependencies = [ - "futures-core", - "futures-sink", - "local-waker", -] - -[[package]] -name = "local-waker" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d873d7c67ce09b42110d801813efbc9364414e356be9935700d368351657487" - [[package]] name = "lock_api" version = "0.4.12" @@ -6124,7 +5813,7 @@ dependencies = [ "apollo-router", "apollo-smith", "async-trait", - "env_logger 0.10.2", + "env_logger", "http 0.2.12", "libfuzzer-sys", "log", @@ -6837,12 +6526,6 @@ dependencies = [ "regex", ] -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - [[package]] name = "strsim" version = "0.11.1" @@ -7335,6 +7018,7 @@ checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", "pin-project-lite", "slab", diff --git a/fuzz/subgraph/Cargo.toml b/fuzz/subgraph/Cargo.toml index a6be7c3c72..e04f35066e 100644 --- a/fuzz/subgraph/Cargo.toml +++ b/fuzz/subgraph/Cargo.toml @@ -4,10 +4,10 @@ version = "0.1.0" edition = "2021" [dependencies] -actix-web = { version = "4", features = ["default"] } -async-graphql = "5" -async-graphql-actix-web = "5" -env_logger = "0.9.0" +axum = "0.6.20" +async-graphql = "6" +async-graphql-axum = "6" +env_logger = "0.10" futures = "0.3.17" lazy_static = "1.4.0" log = "0.4.16" @@ -15,3 +15,4 @@ moka = { version = "0.8.5", features = ["future"] } rand = { version = "0.8.5", features = ["std_rng"] } serde_json = "1.0.79" tokio = { version = "1.22.0", features = ["time", "full"] } +tower = "0.4.0" diff --git a/fuzz/subgraph/src/main.rs b/fuzz/subgraph/src/main.rs index d6e497a9a6..0be9550c7c 100644 --- a/fuzz/subgraph/src/main.rs +++ b/fuzz/subgraph/src/main.rs @@ -1,66 +1,37 @@ -use std::time::Duration; - -use actix_web::get; -use actix_web::post; -use actix_web::web; -use actix_web::web::Data; -use actix_web::App; -use actix_web::HttpResponse; -use actix_web::HttpServer; -use actix_web::Result; -use async_graphql::http::playground_source; -use async_graphql::http::GraphQLPlaygroundConfig; use async_graphql::EmptySubscription; -use async_graphql::Schema; -use async_graphql_actix_web::GraphQLRequest; +use async_graphql_axum::GraphQLRequest; +use async_graphql_axum::GraphQLResponse; +use axum::routing::post; +use axum::Extension; +use axum::Router; +use tower::ServiceBuilder; use crate::model::Mutation; use crate::model::Query; mod model; -#[post("/")] -async fn index( - schema: web::Data>, - mut req: GraphQLRequest, -) -> HttpResponse { +type Schema = async_graphql::Schema; + +async fn graphql_handler(schema: Extension, mut req: GraphQLRequest) -> GraphQLResponse { //Zero out the random variable req.0.variables.remove(&async_graphql::Name::new("random")); println!("query: {}", req.0.query); - - let response = schema.execute(req.into_inner()).await; - let response_json = serde_json::to_string(&response).unwrap(); - - HttpResponse::Ok() - .content_type("application/json") - .body(response_json) -} - -#[get("*")] -async fn index_playground() -> Result { - Ok(HttpResponse::Ok() - .content_type("text/html; charset=utf-8") - .body(playground_source( - GraphQLPlaygroundConfig::new("/").subscription_endpoint("/"), - ))) + schema.execute(req.into_inner()).await.into() } #[tokio::main] -async fn main() -> std::io::Result<()> { +async fn main() { env_logger::init(); println!("about to listen to http://localhost:4005"); - HttpServer::new(move || { - let schema = Schema::build(Query, Mutation, EmptySubscription).finish(); - App::new() - .app_data(Data::new(schema)) - //.wrap(EnsureKeepAlive) - //.wrap(DelayFor::default()) - .service(index) - .service(index_playground) - }) - .keep_alive(Duration::from_secs(75)) - .bind("0.0.0.0:4005")? - .run() - .await + let schema = Schema::build(Query, Mutation, EmptySubscription).finish(); + let router = Router::new() + .route("/", post(graphql_handler)) + .layer(ServiceBuilder::new().layer(Extension(schema))); + + axum::Server::bind(&"0.0.0.0:4005".parse().expect("Fixed address is valid")) + .serve(router.into_make_service()) + .await + .expect("Server failed to start") } From 03f1d8365ade06b6e779bd498ab64f13eb1edae4 Mon Sep 17 00:00:00 2001 From: Dylan Anthony Date: Tue, 20 Aug 2024 12:27:29 -0600 Subject: [PATCH 086/108] Split up `extract_subgraphs_from_supergraph` module (#5856) --- apollo-federation/src/lib.rs | 10 +- .../src/query_graph/build_query_graph.rs | 2 +- apollo-federation/src/query_graph/mod.rs | 1 - .../src/query_plan/fetch_dependency_graph.rs | 4 +- .../mod.rs} | 215 +----------------- apollo-federation/src/supergraph/schema.rs | 109 +++++++++ apollo-federation/src/supergraph/subgraph.rs | 107 +++++++++ 7 files changed, 232 insertions(+), 216 deletions(-) rename apollo-federation/src/{query_graph/extract_subgraphs_from_supergraph.rs => supergraph/mod.rs} (94%) create mode 100644 apollo-federation/src/supergraph/schema.rs create mode 100644 apollo-federation/src/supergraph/subgraph.rs diff --git a/apollo-federation/src/lib.rs b/apollo-federation/src/lib.rs index 4eb4afa5be..ed02b15ada 100644 --- a/apollo-federation/src/lib.rs +++ b/apollo-federation/src/lib.rs @@ -29,6 +29,7 @@ pub mod query_graph; pub mod query_plan; pub mod schema; pub mod subgraph; +pub(crate) mod supergraph; pub(crate) mod utils; use apollo_compiler::ast::NamedType; @@ -46,10 +47,10 @@ use crate::link::spec::Identity; use crate::link::spec_definition::SpecDefinitions; use crate::merge::merge_subgraphs; use crate::merge::MergeFailure; -pub use crate::query_graph::extract_subgraphs_from_supergraph::ValidFederationSubgraph; -pub use crate::query_graph::extract_subgraphs_from_supergraph::ValidFederationSubgraphs; use crate::schema::ValidFederationSchema; use crate::subgraph::ValidSubgraph; +pub use crate::supergraph::ValidFederationSubgraph; +pub use crate::supergraph::ValidFederationSubgraphs; pub(crate) type SupergraphSpecs = (&'static LinkSpecDefinition, &'static JoinSpecDefinition); @@ -128,10 +129,7 @@ impl Supergraph { } pub fn extract_subgraphs(&self) -> Result { - crate::query_graph::extract_subgraphs_from_supergraph::extract_subgraphs_from_supergraph( - &self.schema, - None, - ) + supergraph::extract_subgraphs_from_supergraph(&self.schema, None) } } diff --git a/apollo-federation/src/query_graph/build_query_graph.rs b/apollo-federation/src/query_graph/build_query_graph.rs index 8aca65e9e0..3dd7abbcd6 100644 --- a/apollo-federation/src/query_graph/build_query_graph.rs +++ b/apollo-federation/src/query_graph/build_query_graph.rs @@ -21,7 +21,6 @@ use crate::link::federation_spec_definition::KeyDirectiveArguments; use crate::operation::merge_selection_sets; use crate::operation::Selection; use crate::operation::SelectionSet; -use crate::query_graph::extract_subgraphs_from_supergraph::extract_subgraphs_from_supergraph; use crate::query_graph::QueryGraph; use crate::query_graph::QueryGraphEdge; use crate::query_graph::QueryGraphEdgeTransition; @@ -41,6 +40,7 @@ use crate::schema::position::SchemaRootDefinitionPosition; use crate::schema::position::TypeDefinitionPosition; use crate::schema::position::UnionTypeDefinitionPosition; use crate::schema::ValidFederationSchema; +use crate::supergraph::extract_subgraphs_from_supergraph; /// Builds a "federated" query graph based on the provided supergraph and API schema. /// diff --git a/apollo-federation/src/query_graph/mod.rs b/apollo-federation/src/query_graph/mod.rs index e77d191efa..15e83f49f9 100644 --- a/apollo-federation/src/query_graph/mod.rs +++ b/apollo-federation/src/query_graph/mod.rs @@ -30,7 +30,6 @@ use crate::schema::ValidFederationSchema; pub mod build_query_graph; pub(crate) mod condition_resolver; -pub(crate) mod extract_subgraphs_from_supergraph; pub(crate) mod graph_path; pub mod output; pub(crate) mod path_tree; diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index b1bbab0ebf..dfc862e23b 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -45,8 +45,6 @@ use crate::operation::SelectionMap; use crate::operation::SelectionSet; use crate::operation::VariableCollector; use crate::operation::TYPENAME_FIELD; -use crate::query_graph::extract_subgraphs_from_supergraph::FEDERATION_REPRESENTATIONS_ARGUMENTS_NAME; -use crate::query_graph::extract_subgraphs_from_supergraph::FEDERATION_REPRESENTATIONS_VAR_NAME; use crate::query_graph::graph_path::concat_op_paths; use crate::query_graph::graph_path::concat_paths_in_parents; use crate::query_graph::graph_path::OpGraphPathContext; @@ -75,6 +73,8 @@ use crate::schema::position::TypeDefinitionPosition; use crate::schema::ValidFederationSchema; use crate::subgraph::spec::ANY_SCALAR_NAME; use crate::subgraph::spec::ENTITIES_QUERY; +use crate::supergraph::FEDERATION_REPRESENTATIONS_ARGUMENTS_NAME; +use crate::supergraph::FEDERATION_REPRESENTATIONS_VAR_NAME; use crate::utils::logging::snapshot; /// Represents the value of a `@defer(label:)` argument. diff --git a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs b/apollo-federation/src/supergraph/mod.rs similarity index 94% rename from apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs rename to apollo-federation/src/supergraph/mod.rs index 5fc428e38b..66d6078871 100644 --- a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs +++ b/apollo-federation/src/supergraph/mod.rs @@ -1,5 +1,6 @@ -use std::collections::BTreeMap; -use std::fmt; +mod schema; +mod subgraph; + use std::fmt::Write; use std::ops::Deref; use std::sync::Arc; @@ -27,7 +28,6 @@ use apollo_compiler::schema::InterfaceType; use apollo_compiler::schema::NamedType; use apollo_compiler::schema::ObjectType; use apollo_compiler::schema::ScalarType; -use apollo_compiler::schema::SchemaBuilder; use apollo_compiler::schema::Type; use apollo_compiler::schema::UnionType; use apollo_compiler::validation::Valid; @@ -37,6 +37,12 @@ use itertools::Itertools; use lazy_static::lazy_static; use time::OffsetDateTime; +use self::schema::get_apollo_directive_names; +pub(crate) use self::schema::new_empty_fed_2_subgraph_schema; +use self::subgraph::FederationSubgraph; +use self::subgraph::FederationSubgraphs; +pub use self::subgraph::ValidFederationSubgraph; +pub use self::subgraph::ValidFederationSubgraphs; use crate::error::FederationError; use crate::error::MultipleFederationErrors; use crate::error::SingleFederationError; @@ -49,9 +55,7 @@ use crate::link::join_spec_definition::JoinSpecDefinition; use crate::link::join_spec_definition::TypeDirectiveArguments; use crate::link::spec::Identity; use crate::link::spec::Version; -use crate::link::spec::APOLLO_SPEC_DOMAIN; use crate::link::spec_definition::SpecDefinition; -use crate::link::Link; use crate::link::DEFAULT_LINK_NAME; use crate::schema::field_set::parse_field_set_without_normalization; use crate::schema::position::is_graphql_reserved_name; @@ -76,7 +80,6 @@ use crate::schema::type_and_directive_specification::ScalarTypeSpecification; use crate::schema::type_and_directive_specification::TypeAndDirectiveSpecification; use crate::schema::type_and_directive_specification::UnionTypeSpecification; use crate::schema::FederationSchema; -use crate::schema::ValidFederationSchema; /// Assumes the given schema has been validated. /// @@ -230,70 +233,6 @@ fn collect_empty_subgraphs( )) } -/// TODO: Use the JS/programmatic approach instead of hard-coding definitions. -pub(crate) fn new_empty_fed_2_subgraph_schema() -> Result { - let builder = SchemaBuilder::new().adopt_orphan_extensions(); - let builder = builder.parse( - r#" - extend schema - @link(url: "https://specs.apollo.dev/link/v1.0") - @link(url: "https://specs.apollo.dev/federation/v2.9") - - directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA - - scalar link__Import - - enum link__Purpose { - """ - \`SECURITY\` features provide metadata necessary to securely resolve fields. - """ - SECURITY - - """ - \`EXECUTION\` features provide metadata necessary for operation execution. - """ - EXECUTION - } - - directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE - - directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION - - directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION - - directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION - - directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA - - directive @federation__extends on OBJECT | INTERFACE - - directive @federation__shareable on OBJECT | FIELD_DEFINITION - - directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION - - directive @federation__override(from: String!, label: String) on FIELD_DEFINITION - - directive @federation__composeDirective(name: String) repeatable on SCHEMA - - directive @federation__interfaceObject on OBJECT - - directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM - - directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM - - directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR - - directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION - - scalar federation__FieldSet - - scalar federation__Scope - "#, - "subgraph.graphql", - ); - FederationSchema::new(builder.build()?) -} - struct TypeInfo { name: NamedType, // IndexMap @@ -308,43 +247,6 @@ struct TypeInfos { input_object_types: Vec, } -/// Builds a map of original name to new name for Apollo feature directives. This is -/// used to handle cases where a directive is renamed via an import statement. For -/// example, importing a directive with a custom name like -/// ```graphql -/// @link(url: "https://specs.apollo.dev/cost/v0.1", import: [{ name: "@cost", as: "@renamedCost" }]) -/// ``` -/// results in a map entry of `cost -> renamedCost` with the `@` prefix removed. -/// -/// If the directive is imported under its default name, that also results in an entry. So, -/// ```graphql -/// @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@cost"]) -/// ``` -/// results in a map entry of `cost -> cost`. This duals as a way to check if a directive -/// is included in the supergraph schema. -/// -/// **Important:** This map does _not_ include directives imported from identities other -/// than `specs.apollo.dev`. This helps us avoid extracting directives to subgraphs -/// when a custom directive's name conflicts with that of a default one. -fn get_apollo_directive_names( - supergraph_schema: &FederationSchema, -) -> Result, FederationError> { - let mut hm: IndexMap = IndexMap::default(); - for directive in &supergraph_schema.schema().schema_definition.directives { - if directive.name.as_str() == "link" { - if let Ok(link) = Link::from_directive_application(directive) { - if link.url.identity.domain != APOLLO_SPEC_DOMAIN { - continue; - } - for import in link.imports { - hm.insert(import.element.clone(), import.imported_name().clone()); - } - } - } - } - Ok(hm) -} - fn extract_subgraphs_from_fed_2_supergraph( supergraph_schema: &FederationSchema, subgraphs: &mut FederationSubgraphs, @@ -1654,105 +1556,6 @@ fn get_subgraph<'subgraph>( }) } -struct FederationSubgraph { - name: String, - url: String, - schema: FederationSchema, -} - -struct FederationSubgraphs { - subgraphs: BTreeMap, -} - -impl FederationSubgraphs { - fn new() -> Self { - FederationSubgraphs { - subgraphs: BTreeMap::new(), - } - } - - fn add(&mut self, subgraph: FederationSubgraph) -> Result<(), FederationError> { - if self.subgraphs.contains_key(&subgraph.name) { - return Err(SingleFederationError::InvalidFederationSupergraph { - message: format!("A subgraph named \"{}\" already exists", subgraph.name), - } - .into()); - } - self.subgraphs.insert(subgraph.name.clone(), subgraph); - Ok(()) - } - - fn get(&self, name: &str) -> Option<&FederationSubgraph> { - self.subgraphs.get(name) - } - - fn get_mut(&mut self, name: &str) -> Option<&mut FederationSubgraph> { - self.subgraphs.get_mut(name) - } -} - -impl IntoIterator for FederationSubgraphs { - type Item = as IntoIterator>::Item; - type IntoIter = as IntoIterator>::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.subgraphs.into_iter() - } -} - -// TODO(@goto-bus-stop): consider an appropriate name for this in the public API -// TODO(@goto-bus-stop): should this exist separately from the `crate::subgraph::Subgraph` type? -#[derive(Debug, Clone)] -pub struct ValidFederationSubgraph { - pub name: String, - pub url: String, - pub schema: ValidFederationSchema, -} - -pub struct ValidFederationSubgraphs { - subgraphs: BTreeMap, ValidFederationSubgraph>, -} - -impl fmt::Debug for ValidFederationSubgraphs { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("ValidFederationSubgraphs ")?; - f.debug_map().entries(self.subgraphs.iter()).finish() - } -} - -impl ValidFederationSubgraphs { - pub(crate) fn new() -> Self { - ValidFederationSubgraphs { - subgraphs: BTreeMap::new(), - } - } - - pub(crate) fn add(&mut self, subgraph: ValidFederationSubgraph) -> Result<(), FederationError> { - if self.subgraphs.contains_key(subgraph.name.as_str()) { - return Err(SingleFederationError::InvalidFederationSupergraph { - message: format!("A subgraph named \"{}\" already exists", subgraph.name), - } - .into()); - } - self.subgraphs - .insert(subgraph.name.as_str().into(), subgraph); - Ok(()) - } - - pub fn get(&self, name: &str) -> Option<&ValidFederationSubgraph> { - self.subgraphs.get(name) - } -} - -impl IntoIterator for ValidFederationSubgraphs { - type Item = , ValidFederationSubgraph> as IntoIterator>::Item; - type IntoIter = , ValidFederationSubgraph> as IntoIterator>::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.subgraphs.into_iter() - } -} - lazy_static! { static ref EXECUTABLE_DIRECTIVE_LOCATIONS: IndexSet = { [ diff --git a/apollo-federation/src/supergraph/schema.rs b/apollo-federation/src/supergraph/schema.rs new file mode 100644 index 0000000000..589131f633 --- /dev/null +++ b/apollo-federation/src/supergraph/schema.rs @@ -0,0 +1,109 @@ +use apollo_compiler::collections::IndexMap; +use apollo_compiler::schema::SchemaBuilder; +use apollo_compiler::Name; + +use crate::error::FederationError; +use crate::link::spec::APOLLO_SPEC_DOMAIN; +use crate::link::Link; +use crate::schema::FederationSchema; + +/// Builds a map of original name to new name for Apollo feature directives. This is +/// used to handle cases where a directive is renamed via an import statement. For +/// example, importing a directive with a custom name like +/// ```graphql +/// @link(url: "https://specs.apollo.dev/cost/v0.1", import: [{ name: "@cost", as: "@renamedCost" }]) +/// ``` +/// results in a map entry of `cost -> renamedCost` with the `@` prefix removed. +/// +/// If the directive is imported under its default name, that also results in an entry. So, +/// ```graphql +/// @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@cost"]) +/// ``` +/// results in a map entry of `cost -> cost`. This duals as a way to check if a directive +/// is included in the supergraph schema. +/// +/// **Important:** This map does _not_ include directives imported from identities other +/// than `specs.apollo.dev`. This helps us avoid extracting directives to subgraphs +/// when a custom directive's name conflicts with that of a default one. +pub(super) fn get_apollo_directive_names( + supergraph_schema: &FederationSchema, +) -> Result, FederationError> { + let mut hm: IndexMap = IndexMap::default(); + for directive in &supergraph_schema.schema().schema_definition.directives { + if directive.name.as_str() == "link" { + if let Ok(link) = Link::from_directive_application(directive) { + if link.url.identity.domain != APOLLO_SPEC_DOMAIN { + continue; + } + for import in link.imports { + hm.insert(import.element.clone(), import.imported_name().clone()); + } + } + } + } + Ok(hm) +} + +/// TODO: Use the JS/programmatic approach instead of hard-coding definitions. +pub(crate) fn new_empty_fed_2_subgraph_schema() -> Result { + let builder = SchemaBuilder::new().adopt_orphan_extensions(); + let builder = builder.parse( + r#" + extend schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/federation/v2.9") + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + scalar link__Import + + enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + + directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + + directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + + directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + + directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + + directive @federation__extends on OBJECT | INTERFACE + + directive @federation__shareable on OBJECT | FIELD_DEFINITION + + directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + + directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + + directive @federation__composeDirective(name: String) repeatable on SCHEMA + + directive @federation__interfaceObject on OBJECT + + directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + + directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + + directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + + directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + + scalar federation__FieldSet + + scalar federation__Scope + "#, + "subgraph.graphql", + ); + FederationSchema::new(builder.build()?) +} diff --git a/apollo-federation/src/supergraph/subgraph.rs b/apollo-federation/src/supergraph/subgraph.rs new file mode 100644 index 0000000000..7697d3b569 --- /dev/null +++ b/apollo-federation/src/supergraph/subgraph.rs @@ -0,0 +1,107 @@ +use std::collections::BTreeMap; +use std::fmt; +use std::sync::Arc; + +use crate::error::FederationError; +use crate::error::SingleFederationError; +use crate::schema::FederationSchema; +use crate::schema::ValidFederationSchema; + +pub(super) struct FederationSubgraph { + pub(super) name: String, + pub(super) url: String, + pub(super) schema: FederationSchema, +} + +pub(super) struct FederationSubgraphs { + pub(super) subgraphs: BTreeMap, +} + +impl FederationSubgraphs { + pub(super) fn new() -> Self { + FederationSubgraphs { + subgraphs: BTreeMap::new(), + } + } + + pub(super) fn add(&mut self, subgraph: FederationSubgraph) -> Result<(), FederationError> { + if self.subgraphs.contains_key(&subgraph.name) { + return Err(SingleFederationError::InvalidFederationSupergraph { + message: format!("A subgraph named \"{}\" already exists", subgraph.name), + } + .into()); + } + self.subgraphs.insert(subgraph.name.clone(), subgraph); + Ok(()) + } + + fn get(&self, name: &str) -> Option<&FederationSubgraph> { + self.subgraphs.get(name) + } + + pub(super) fn get_mut(&mut self, name: &str) -> Option<&mut FederationSubgraph> { + self.subgraphs.get_mut(name) + } +} + +impl IntoIterator for FederationSubgraphs { + type Item = as IntoIterator>::Item; + type IntoIter = as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.subgraphs.into_iter() + } +} + +// TODO(@goto-bus-stop): consider an appropriate name for this in the public API +// TODO(@goto-bus-stop): should this exist separately from the `crate::subgraph::Subgraph` type? +#[derive(Debug, Clone)] +pub struct ValidFederationSubgraph { + pub name: String, + pub url: String, + pub schema: ValidFederationSchema, +} + +pub struct ValidFederationSubgraphs { + pub(super) subgraphs: BTreeMap, ValidFederationSubgraph>, +} + +impl fmt::Debug for ValidFederationSubgraphs { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("ValidFederationSubgraphs ")?; + f.debug_map().entries(self.subgraphs.iter()).finish() + } +} + +impl ValidFederationSubgraphs { + pub(crate) fn new() -> Self { + ValidFederationSubgraphs { + subgraphs: BTreeMap::new(), + } + } + + pub(crate) fn add(&mut self, subgraph: ValidFederationSubgraph) -> Result<(), FederationError> { + if self.subgraphs.contains_key(subgraph.name.as_str()) { + return Err(SingleFederationError::InvalidFederationSupergraph { + message: format!("A subgraph named \"{}\" already exists", subgraph.name), + } + .into()); + } + self.subgraphs + .insert(subgraph.name.as_str().into(), subgraph); + Ok(()) + } + + pub fn get(&self, name: &str) -> Option<&ValidFederationSubgraph> { + self.subgraphs.get(name) + } +} + +impl IntoIterator for ValidFederationSubgraphs { + type Item = , ValidFederationSubgraph> as IntoIterator>::Item; + type IntoIter = , ValidFederationSubgraph> as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.subgraphs.into_iter() + } +} From cdf9e6bddbfc31ad857ad91cef9df3fd2ea05262 Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Wed, 21 Aug 2024 11:52:53 +0200 Subject: [PATCH 087/108] fix(subgraph_service): when the subgraph connection is closed or in error, return a proper subgraph response (#5859) Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- ...bnjjj_fix_fatal_error_subgraph_response.md | 5 + .../src/services/subgraph_service.rs | 95 ++++++++++++++++++- 2 files changed, 96 insertions(+), 4 deletions(-) create mode 100644 .changesets/fix_bnjjj_fix_fatal_error_subgraph_response.md diff --git a/.changesets/fix_bnjjj_fix_fatal_error_subgraph_response.md b/.changesets/fix_bnjjj_fix_fatal_error_subgraph_response.md new file mode 100644 index 0000000000..9e09db2866 --- /dev/null +++ b/.changesets/fix_bnjjj_fix_fatal_error_subgraph_response.md @@ -0,0 +1,5 @@ +### fix(subgraph_service): when the subgraph connection is closed or in error, return a proper subgraph response ([PR #5859](https://github.com/apollographql/router/pull/5859)) + +When the subgraph connection is closed or in error, return a proper subgraph response containing an error. This was preventing subgraph response service to be triggered in coprocessor and rhai. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5859 \ No newline at end of file diff --git a/apollo-router/src/services/subgraph_service.rs b/apollo-router/src/services/subgraph_service.rs index c2274f6bc8..5d8fae1ede 100644 --- a/apollo-router/src/services/subgraph_service.rs +++ b/apollo-router/src/services/subgraph_service.rs @@ -16,6 +16,7 @@ use http::header::{self}; use http::response::Parts; use http::HeaderValue; use http::Request; +use http::StatusCode; use hyper_rustls::ConfigBuilderExt; use itertools::Itertools; use mediatype::names::APPLICATION; @@ -871,9 +872,34 @@ pub(crate) async fn process_batch( // Perform the actual fetch. If this fails then we didn't manage to make the call at all, so we can't do anything with it. tracing::debug!("fetching from subgraph: {service}"); let (parts, content_type, body) = - do_fetch(client, &batch_context, &service, request, display_body) + match do_fetch(client, &batch_context, &service, request, display_body) .instrument(subgraph_req_span) - .await?; + .await + { + Ok(res) => res, + Err(err) => { + let resp = http::Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body(err.to_graphql_error(None)) + .map_err(|err| FetchError::SubrequestHttpError { + status_code: None, + service: service.clone(), + reason: format!("cannot create the http response from error: {err:?}"), + })?; + let (parts, body) = resp.into_parts(); + let body = + serde_json::to_vec(&body).map_err(|err| FetchError::SubrequestHttpError { + status_code: None, + service: service.clone(), + reason: format!("cannot serialize the error: {err:?}"), + })?; + ( + parts, + Ok(ContentType::ApplicationJson), + Some(Ok(body.into())), + ) + } + }; let subgraph_response_event = batch_context .extensions() @@ -1283,9 +1309,21 @@ pub(crate) async fn call_single_http( // Perform the actual fetch. If this fails then we didn't manage to make the call at all, so we can't do anything with it. let (parts, content_type, body) = - do_fetch(client, &context, service_name, request, display_body) + match do_fetch(client, &context, service_name, request, display_body) .instrument(subgraph_req_span) - .await?; + .await + { + Ok(resp) => resp, + Err(err) => { + return Ok(SubgraphResponse::builder() + .subgraph_name(service_name.to_string()) + .error(err.to_graphql_error(None)) + .status_code(StatusCode::INTERNAL_SERVER_ERROR) + .context(context) + .extensions(Object::default()) + .build()); + } + }; let subgraph_response_event = context .extensions() @@ -1705,6 +1743,17 @@ mod tests { server.await.unwrap(); } + // starts a local server emulating a subgraph returning connection closed + async fn emulate_subgraph_panic(listener: TcpListener) { + async fn handle(_request: http::Request) -> Result, Infallible> { + panic!("test") + } + + let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) }); + let server = Server::from_tcp(listener).unwrap().serve(make_svc); + server.await.unwrap(); + } + // starts a local server emulating a subgraph returning bad response format async fn emulate_subgraph_ok_status_invalid_response(listener: TcpListener) { async fn handle(_request: http::Request) -> Result, Infallible> { @@ -2421,6 +2470,44 @@ mod tests { assert!(response.response.body().errors.is_empty()); } + #[tokio::test(flavor = "multi_thread")] + async fn test_subgraph_service_panic() { + let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let socket_addr = listener.local_addr().unwrap(); + tokio::task::spawn(emulate_subgraph_panic(listener)); + let subgraph_service = SubgraphService::new( + "test", + true, + None, + Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), + ) + .expect("can create a SubgraphService"); + + let url = Uri::from_str(&format!("http://{socket_addr}")).unwrap(); + let response = subgraph_service + .oneshot( + SubgraphRequest::builder() + .supergraph_request(supergraph_request("query")) + .subgraph_request(subgraph_http_request(url, "query")) + .operation_kind(OperationKind::Query) + .subgraph_name(String::from("test")) + .context(Context::new()) + .build(), + ) + .await + .unwrap(); + assert!(!response.response.body().errors.is_empty()); + assert_eq!( + response.response.body().errors[0].message, + "HTTP fetch failed from 'test': HTTP fetch failed from 'test': connection closed before message completed" + ); + } + #[tokio::test(flavor = "multi_thread")] async fn test_subgraph_service_invalid_response() { let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); From 092170a2460afe2b7934f18decb6a502179c2e9e Mon Sep 17 00:00:00 2001 From: Taylor Ninesling Date: Wed, 21 Aug 2024 07:29:11 -0700 Subject: [PATCH 088/108] Add sections on using @cost and @listSize to demand control docs (#5839) --- .../docs_tninesling_cost_docs_update.md | 5 ++ .../executing-operations/demand-control.mdx | 58 +++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 .changesets/docs_tninesling_cost_docs_update.md diff --git a/.changesets/docs_tninesling_cost_docs_update.md b/.changesets/docs_tninesling_cost_docs_update.md new file mode 100644 index 0000000000..7311bdfa38 --- /dev/null +++ b/.changesets/docs_tninesling_cost_docs_update.md @@ -0,0 +1,5 @@ +### Add sections on using @cost and @listSize to demand control docs ([PR #5839](https://github.com/apollographql/router/pull/5839)) + +Updates the demand control documentation to include details on `@cost` and `@listSize` for more accurate cost estimation. + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5839 diff --git a/docs/source/executing-operations/demand-control.mdx b/docs/source/executing-operations/demand-control.mdx index 8b441cb381..f2c5657a22 100644 --- a/docs/source/executing-operations/demand-control.mdx +++ b/docs/source/executing-operations/demand-control.mdx @@ -360,6 +360,21 @@ Looking at the top N operations, you may see that the estimated costs have been All operations except `ExtractAll` are in a range of acceptable costs. + + +#### `@listSize` + + + +If some of your fields have list sizes that significantly differ from `static_estimated.list_size`, you can provide the router with more information. + +The `@listSize` directive can be configured in multiple ways: + +1. Use the `assumedSize` argument to define a static size for a field. +2. Use `slicingArguments` to indicate that a field's size is dynamically controlled by one or more of its arguments. This works well if some of the arguments are paging parameters. + +Learn more about the `@listSize` directive [here](/federation/federated-schemas/federated-directives/#listsize). + ### Enforce cost limits After determining the cost estimation model of your operations, you should update and enforce the new cost limits. @@ -440,6 +455,49 @@ Assuming each review having exactly one author, the total cost of the query is 2 + + +#### `@cost` + + + +You can further customize the cost calculation with the `@cost` directive. This directive takes a `weight` argument which replaces the default weights outlined above. + +Revisiting the products query above, if the `topProducts.name` field is annotated with `@cost(weight: 5)`, then the total cost of the query increases to 56. + + + +```graphql +type Query { + topProducts: [Product] +} + +type Product { + name: String! @cost(weight: 5) + reviews: [Review] +} + +type Review { + author: Author! +} + +type Author { + name: String! +} +``` + + + + + +```text disableCopy=true showLineNumbers=false +1 Query (0 cost) + 6 product objects (6) + 6 name scalars (30) + 10 review objects (10) + 10 author objects (10) + 10 name scalars (0) = 56 total cost +``` + + + +Learn more about the `@cost` directive [here](/federation/federated-schemas/federated-directives/#cost). + ### Estimated and actual costs For an operation with list fields, the router must run the operation to get the actual number of items in its lists. Without actual list sizes, the cost of an operation can only be estimated before it's executed, where you assume the size of lists. From a0a98d4738b54625d4e135765012e0c309a61623 Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Thu, 22 Aug 2024 10:10:04 +0200 Subject: [PATCH 089/108] fix(telemetry): improve support of conditions at the request level, especially for events (#5759) Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- .../feat_candle_exhale_deodorant_weeds.md | 26 +++ .changesets/fix_bnjjj_fix_5702.md | 21 +++ apollo-router/src/plugins/telemetry/config.rs | 8 + .../telemetry/config_new/conditional.rs | 16 ++ .../telemetry/config_new/conditions.rs | 159 ++++++++++++++--- .../plugins/telemetry/config_new/events.rs | 113 ++++++++++++ .../telemetry/config_new/extendable.rs | 18 ++ .../telemetry/config_new/graphql/selectors.rs | 5 + .../telemetry/config_new/instruments.rs | 30 ++++ .../src/plugins/telemetry/config_new/mod.rs | 40 ++++- .../plugins/telemetry/config_new/selectors.rs | 166 ++++++++++++++++++ ...aph_events_with_exists_condition@logs.snap | 22 +++ .../src/plugins/telemetry/config_new/spans.rs | 20 +++ apollo-router/src/plugins/telemetry/mod.rs | 3 + ...custom_events_exists_condition.router.yaml | 13 ++ 15 files changed, 638 insertions(+), 22 deletions(-) create mode 100644 .changesets/feat_candle_exhale_deodorant_weeds.md create mode 100644 .changesets/fix_bnjjj_fix_5702.md create mode 100644 apollo-router/src/plugins/telemetry/config_new/snapshots/apollo_router__plugins__telemetry__config_new__events__tests__supergraph_events_with_exists_condition@logs.snap create mode 100644 apollo-router/src/plugins/telemetry/testdata/custom_events_exists_condition.router.yaml diff --git a/.changesets/feat_candle_exhale_deodorant_weeds.md b/.changesets/feat_candle_exhale_deodorant_weeds.md new file mode 100644 index 0000000000..ed3f311569 --- /dev/null +++ b/.changesets/feat_candle_exhale_deodorant_weeds.md @@ -0,0 +1,26 @@ +### Add warnings for invalid configuration on custom telemetry ([PR #5759](https://github.com/apollographql/router/issues/5759)) + +For example sometimes if you have configuration like this: + +```yaml +telemetry: + instrumentation: + events: + subgraph: + my.event: + message: "Auditing Router Event" + level: info + on: request + attributes: + subgraph.response.status: + subgraph_response_status: code # This is a first warning because you can't access to the response if you're at the request stage + condition: + eq: + - subgraph_name # Another warning because instead of writing subgraph_name: true which is the selector, you're asking for a comparison between 2 strings ("subgraph_name" and "product") + - product +``` + +This configuration is syntaxically correct but wouldn't probably do what you would like to. I put comments to highlight 2 mistakes in this example. +Before it was silently computed, now you'll get warning when starting the router. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5759 \ No newline at end of file diff --git a/.changesets/fix_bnjjj_fix_5702.md b/.changesets/fix_bnjjj_fix_5702.md new file mode 100644 index 0000000000..14e662e6bb --- /dev/null +++ b/.changesets/fix_bnjjj_fix_5702.md @@ -0,0 +1,21 @@ +### Improve support of conditions at the request level, especially for events ([Issue #5702](https://github.com/apollographql/router/issues/5702)) + +`exists` condition is now properly handled with events, this configuration will now work: + +```yaml +telemetry: + instrumentation: + events: + supergraph: + my.event: + message: "Auditing Router Event" + level: info + on: request + attributes: + graphql.operation.name: true + condition: + exists: + operation_name: string +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5759 \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index f360774b9b..dba6f207f4 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -94,6 +94,14 @@ pub(crate) struct Instrumentation { pub(crate) instruments: config_new::instruments::InstrumentsConfig, } +impl Instrumentation { + pub(crate) fn validate(&self) -> Result<(), String> { + self.events.validate()?; + self.instruments.validate()?; + self.spans.validate() + } +} + /// Metrics configuration #[derive(Clone, Default, Debug, Deserialize, JsonSchema)] #[serde(deny_unknown_fields, default)] diff --git a/apollo-router/src/plugins/telemetry/config_new/conditional.rs b/apollo-router/src/plugins/telemetry/config_new/conditional.rs index 94136ee63d..a42f112a8c 100644 --- a/apollo-router/src/plugins/telemetry/config_new/conditional.rs +++ b/apollo-router/src/plugins/telemetry/config_new/conditional.rs @@ -149,6 +149,18 @@ where } } +impl Conditional +where + Att: Selector, +{ + pub(crate) fn validate(&self) -> Result<(), String> { + match &self.condition { + Some(cond) => cond.lock().validate(None), + None => Ok(()), + } + } +} + impl Selector for Conditional where Att: Selector, @@ -334,6 +346,10 @@ where _ => None, } } + + fn is_active(&self, stage: super::Stage) -> bool { + self.selector.is_active(stage) + } } /// Custom Deserializer for attributes that will deserialize into a custom field if possible, but otherwise into one of the pre-defined attributes. diff --git a/apollo-router/src/plugins/telemetry/config_new/conditions.rs b/apollo-router/src/plugins/telemetry/config_new/conditions.rs index d3c44610a0..915fad6135 100644 --- a/apollo-router/src/plugins/telemetry/config_new/conditions.rs +++ b/apollo-router/src/plugins/telemetry/config_new/conditions.rs @@ -3,6 +3,7 @@ use schemars::JsonSchema; use serde::Deserialize; use tower::BoxError; +use super::Stage; use crate::plugins::telemetry::config::AttributeValue; use crate::plugins::telemetry::config_new::Selector; use crate::Context; @@ -55,28 +56,98 @@ impl Condition where T: Selector, { - pub(crate) fn evaluate_request(&mut self, request: &T::Request) -> Option { + /// restricted_stage is Some if this condiiton will only applies at a specific stage like for events for example + pub(crate) fn validate(&self, restricted_stage: Option) -> Result<(), String> { match self { - Condition::Eq(eq) => match (eq[0].on_request(request), eq[1].on_request(request)) { - (None, None) => None, - (None, Some(right)) => { - eq[1] = SelectorOrValue::Value(right.into()); - None + Condition::Eq(arr) | Condition::Gt(arr) | Condition::Lt(arr) => match (&arr[0], &arr[1]) { + (SelectorOrValue::Value(val1), SelectorOrValue::Value(val2)) => { + Err(format!("trying to compare 2 values ('{val1}' and '{val2}'), usually it's a syntax error because you want to use a specific selector and a value in a condition")) } - (Some(left), None) => { - eq[0] = SelectorOrValue::Value(left.into()); - None - } - (Some(left), Some(right)) => { - if left == right { - *self = Condition::True; - Some(true) - } else { - Some(false) + (SelectorOrValue::Value(_), SelectorOrValue::Selector(sel)) | (SelectorOrValue::Selector(sel), SelectorOrValue::Value(_)) => { + // Special condition for events + if let Some(Stage::Request) = &restricted_stage { + if !sel.is_active(Stage::Request) { + return Err(format!("selector {sel:?} is only valid for request stage, this log event will never trigger")); + } + } + Ok(()) + }, + (SelectorOrValue::Selector(sel1), SelectorOrValue::Selector(sel2)) => { + // Special condition for events + if let Some(Stage::Request) = &restricted_stage { + if !sel1.is_active(Stage::Request) { + return Err(format!("selector {sel1:?} is only valid for request stage, this log event will never trigger")); + } + if !sel2.is_active(Stage::Request) { + return Err(format!("selector {sel2:?} is only valid for request stage, this log event will never trigger")); + } } + Ok(()) + }, + }, + Condition::Exists(sel) => { + match restricted_stage { + Some(stage) => { + if sel.is_active(stage) { + Ok(()) + } else { + Err(format!("the 'exists' condition use a selector applied at the wrong stage, this condition will be executed at the {} stage", stage)) + } + }, + None => Ok(()) } }, + Condition::All(all) => { + for cond in all { + cond.validate(restricted_stage)?; + } + + Ok(()) + }, + Condition::Any(any) => { + for cond in any { + cond.validate(restricted_stage)?; + } + + Ok(()) + }, + Condition::Not(cond) => cond.validate(restricted_stage), + Condition::True | Condition::False => Ok(()), + } + } + + pub(crate) fn evaluate_request(&mut self, request: &T::Request) -> Option { + match self { + Condition::Eq(eq) => { + if !eq[0].is_active(Stage::Request) && !eq[1].is_active(Stage::Request) { + // Nothing to compute here + return None; + } + match (eq[0].on_request(request), eq[1].on_request(request)) { + (None, None) => None, + (None, Some(right)) => { + eq[1] = SelectorOrValue::Value(right.into()); + None + } + (Some(left), None) => { + eq[0] = SelectorOrValue::Value(left.into()); + None + } + (Some(left), Some(right)) => { + if left == right { + *self = Condition::True; + Some(true) + } else { + Some(false) + } + } + } + } Condition::Gt(gt) => { + if !gt[0].is_active(Stage::Request) && !gt[1].is_active(Stage::Request) { + // Nothing to compute here + return None; + } let left_att = gt[0].on_request(request).map(AttributeValue::from); let right_att = gt[1].on_request(request).map(AttributeValue::from); match (left_att, right_att) { @@ -112,6 +183,10 @@ where } } Condition::Lt(lt) => { + if !lt[0].is_active(Stage::Request) && !lt[1].is_active(Stage::Request) { + // Nothing to compute here + return None; + } let left_att = lt[0].on_request(request).map(AttributeValue::from); let right_att = lt[1].on_request(request).map(AttributeValue::from); match (left_att, right_att) { @@ -147,9 +222,13 @@ where } } Condition::Exists(exist) => { - if exist.on_request(request).is_some() { - *self = Condition::True; - Some(true) + if exist.is_active(Stage::Request) { + if exist.on_request(request).is_some() { + *self = Condition::True; + Some(true) + } else { + Some(false) + } } else { None } @@ -331,6 +410,7 @@ where Condition::False => false, } } + pub(crate) fn evaluate_drop(&self) -> Option { match self { Condition::Eq(eq) => match (eq[0].on_drop(), eq[1].on_drop()) { @@ -478,6 +558,13 @@ where SelectorOrValue::Selector(selector) => selector.on_drop(), } } + + fn is_active(&self, stage: super::Stage) -> bool { + match self { + SelectorOrValue::Value(_) => true, + SelectorOrValue::Selector(selector) => selector.is_active(stage), + } + } } #[cfg(test)] @@ -494,8 +581,10 @@ mod test { use crate::plugins::telemetry::config_new::test::field; use crate::plugins::telemetry::config_new::test::ty; use crate::plugins::telemetry::config_new::Selector; + use crate::plugins::telemetry::config_new::Stage; use crate::Context; + #[derive(Debug)] enum TestSelector { Req, Resp, @@ -567,11 +656,22 @@ mod test { _ => None, } } + + fn is_active(&self, stage: crate::plugins::telemetry::config_new::Stage) -> bool { + match self { + Req => matches!(stage, Stage::Request), + Resp => matches!( + stage, + Stage::Response | Stage::ResponseEvent | Stage::ResponseField + ), + Static(_) => true, + } + } } #[test] fn test_condition_exist() { - assert_eq!(exists(Req).req(None), None); + assert_eq!(exists(Req).req(None), Some(false)); assert_eq!(exists(Req).req(Some(1i64)), Some(true)); assert!(!exists(Resp).resp(None)); assert!(exists(Resp).resp(Some(1i64))); @@ -747,7 +847,8 @@ mod test { assert_eq!(lt(Req, Req).req(None), None); assert_eq!(exists(Req).req(Some(1i64)), Some(true)); - assert_eq!(exists(Req).req(None), None); + assert_eq!(exists(Req).req(None), Some(false)); + assert!(!exists(Resp).resp(None)); assert_eq!(all(eq(1, 1), eq(1, Req)).req(Some(1i64)), Some(true)); assert_eq!(all(eq(1, 1), eq(1, Req)).req(None), None); @@ -760,6 +861,22 @@ mod test { assert!(eq(Resp, "error").error(Some("error"))); } + #[test] + fn test_condition_validate() { + assert!(eq(Req, 1).validate(Some(Stage::Request)).is_ok()); + assert!(eq(Req, 1).validate(Some(Stage::Response)).is_ok()); + assert!(eq(1, Req).validate(Some(Stage::Request)).is_ok()); + assert!(eq(1, Req).validate(Some(Stage::Response)).is_ok()); + assert!(eq(Resp, 1).validate(Some(Stage::Request)).is_err()); + assert!(eq(Resp, 1).validate(None).is_ok()); + assert!(eq(1, Resp).validate(None).is_ok()); + assert!(eq(1, Resp).validate(Some(Stage::Request)).is_err()); + assert!(exists(Resp).validate(Some(Stage::Request)).is_err()); + assert!(exists(Req).validate(None).is_ok()); + assert!(exists(Req).validate(Some(Stage::Request)).is_ok()); + assert!(exists(Resp).validate(None).is_ok()); + } + #[test] fn test_evaluate_drop() { assert!(eq(Req, 1).evaluate_drop().is_none()); diff --git a/apollo-router/src/plugins/telemetry/config_new/events.rs b/apollo-router/src/plugins/telemetry/config_new/events.rs index bf471dc019..a91067a0e2 100644 --- a/apollo-router/src/plugins/telemetry/config_new/events.rs +++ b/apollo-router/src/plugins/telemetry/config_new/events.rs @@ -14,6 +14,7 @@ use tracing::Span; use super::instruments::Instrumented; use super::Selector; use super::Selectors; +use super::Stage; use crate::plugins::telemetry::config_new::attributes::RouterAttributes; use crate::plugins::telemetry::config_new::attributes::SubgraphAttributes; use crate::plugins::telemetry::config_new::attributes::SupergraphAttributes; @@ -127,6 +128,54 @@ impl Events { custom: custom_events, } } + + pub(crate) fn validate(&self) -> Result<(), String> { + if let StandardEventConfig::Conditional { condition, .. } = &self.router.attributes.request + { + condition.validate(Some(Stage::Request))?; + } + if let StandardEventConfig::Conditional { condition, .. } = &self.router.attributes.response + { + condition.validate(Some(Stage::Response))?; + } + if let StandardEventConfig::Conditional { condition, .. } = + &self.supergraph.attributes.request + { + condition.validate(Some(Stage::Request))?; + } + if let StandardEventConfig::Conditional { condition, .. } = + &self.supergraph.attributes.response + { + condition.validate(Some(Stage::Response))?; + } + if let StandardEventConfig::Conditional { condition, .. } = + &self.subgraph.attributes.request + { + condition.validate(Some(Stage::Request))?; + } + if let StandardEventConfig::Conditional { condition, .. } = + &self.subgraph.attributes.response + { + condition.validate(Some(Stage::Response))?; + } + for (name, custom_event) in &self.router.custom { + custom_event.validate().map_err(|err| { + format!("configuration error for router custom event {name:?}: {err}") + })?; + } + for (name, custom_event) in &self.supergraph.custom { + custom_event.validate().map_err(|err| { + format!("configuration error for supergraph custom event {name:?}: {err}") + })?; + } + for (name, custom_event) in &self.subgraph.custom { + custom_event.validate().map_err(|err| { + format!("configuration error for subgraph custom event {name:?}: {err}") + })?; + } + + Ok(()) + } } pub(crate) type RouterEvents = @@ -576,6 +625,21 @@ where condition: Condition, } +impl Event +where + A: Selectors + + Default + + Debug, + E: Selector + Debug, +{ + pub(crate) fn validate(&self) -> Result<(), String> { + let stage = Some(self.on.into()); + self.attributes.validate(stage)?; + self.condition.validate(stage)?; + Ok(()) + } +} + /// When to trigger the event. #[derive(Deserialize, JsonSchema, Clone, Debug, Copy, PartialEq)] #[serde(rename_all = "snake_case")] @@ -736,6 +800,7 @@ mod tests { use super::*; use crate::assert_snapshot_subscriber; use crate::context::CONTAINS_GRAPHQL_ERROR; + use crate::context::OPERATION_NAME; use crate::graphql; use crate::plugins::telemetry::Telemetry; use crate::plugins::test::PluginTestHarness; @@ -877,6 +942,54 @@ mod tests { .await } + #[tokio::test(flavor = "multi_thread")] + async fn test_supergraph_events_with_exists_condition() { + let test_harness: PluginTestHarness = PluginTestHarness::builder() + .config(include_str!( + "../testdata/custom_events_exists_condition.router.yaml" + )) + .build() + .await; + + async { + let ctx = Context::new(); + ctx.insert(OPERATION_NAME, String::from("Test")).unwrap(); + test_harness + .call_supergraph( + supergraph::Request::fake_builder() + .query("query Test { foo }") + .context(ctx) + .build() + .unwrap(), + |_r| { + supergraph::Response::fake_builder() + .data(serde_json::json!({"data": "res"}).to_string()) + .build() + .expect("expecting valid response") + }, + ) + .await + .expect("expecting successful response"); + test_harness + .call_supergraph( + supergraph::Request::fake_builder() + .query("query { foo }") + .build() + .unwrap(), + |_r| { + supergraph::Response::fake_builder() + .data(serde_json::json!({"data": "res"}).to_string()) + .build() + .expect("expecting valid response") + }, + ) + .await + .expect("expecting successful response"); + } + .with_subscriber(assert_snapshot_subscriber!()) + .await + } + #[tokio::test(flavor = "multi_thread")] async fn test_supergraph_events_on_graphql_error() { let test_harness: PluginTestHarness = PluginTestHarness::builder() diff --git a/apollo-router/src/plugins/telemetry/config_new/extendable.rs b/apollo-router/src/plugins/telemetry/config_new/extendable.rs index f3c1a4d332..6af5d2bf1c 100644 --- a/apollo-router/src/plugins/telemetry/config_new/extendable.rs +++ b/apollo-router/src/plugins/telemetry/config_new/extendable.rs @@ -17,6 +17,7 @@ use serde_json::Map; use serde_json::Value; use tower::BoxError; +use super::Stage; use crate::plugins::telemetry::config_new::attributes::DefaultAttributeRequirementLevel; use crate::plugins::telemetry::config_new::DefaultForLevel; use crate::plugins::telemetry::config_new::Selector; @@ -255,6 +256,23 @@ where } } +impl Extendable +where + A: Default + Selectors, + E: Selector, +{ + pub(crate) fn validate(&self, restricted_stage: Option) -> Result<(), String> { + if let Some(Stage::Request) = &restricted_stage { + for (name, custom) in &self.custom { + if !custom.is_active(Stage::Request) { + return Err(format!("cannot set the attribute {name:?} because it is using a selector computed in another stage than 'request' so it will not be computed")); + } + } + } + + Ok(()) + } +} #[cfg(test)] mod test { use std::sync::Arc; diff --git a/apollo-router/src/plugins/telemetry/config_new/graphql/selectors.rs b/apollo-router/src/plugins/telemetry/config_new/graphql/selectors.rs index 20a648d465..853681087f 100644 --- a/apollo-router/src/plugins/telemetry/config_new/graphql/selectors.rs +++ b/apollo-router/src/plugins/telemetry/config_new/graphql/selectors.rs @@ -13,6 +13,7 @@ use crate::plugins::telemetry::config_new::instruments::InstrumentValue; use crate::plugins::telemetry::config_new::instruments::StandardUnit; use crate::plugins::telemetry::config_new::selectors::OperationName; use crate::plugins::telemetry::config_new::Selector; +use crate::plugins::telemetry::config_new::Stage; use crate::Context; #[derive(Deserialize, JsonSchema, Clone, Debug)] @@ -173,6 +174,10 @@ impl Selector for GraphQLSelector { } } } + + fn is_active(&self, stage: Stage) -> bool { + matches!(stage, Stage::ResponseField) + } } fn name_to_otel_string(name: &apollo_compiler::Name) -> opentelemetry::StringValue { diff --git a/apollo-router/src/plugins/telemetry/config_new/instruments.rs b/apollo-router/src/plugins/telemetry/config_new/instruments.rs index 3b112a6f2d..341f84ad35 100644 --- a/apollo-router/src/plugins/telemetry/config_new/instruments.rs +++ b/apollo-router/src/plugins/telemetry/config_new/instruments.rs @@ -103,6 +103,36 @@ const HTTP_CLIENT_REQUEST_BODY_SIZE_METRIC: &str = "http.client.request.body.siz const HTTP_CLIENT_RESPONSE_BODY_SIZE_METRIC: &str = "http.client.response.body.size"; impl InstrumentsConfig { + pub(crate) fn validate(&self) -> Result<(), String> { + for (name, custom) in &self.router.custom { + custom.condition.validate(None).map_err(|err| { + format!("error for custom router instrument {name:?} in condition: {err}") + })?; + } + for (name, custom) in &self.supergraph.custom { + custom.condition.validate(None).map_err(|err| { + format!("error for custom supergraph instrument {name:?} in condition: {err}") + })?; + } + for (name, custom) in &self.subgraph.custom { + custom.condition.validate(None).map_err(|err| { + format!("error for custom subgraph instrument {name:?} in condition: {err}") + })?; + } + for (name, custom) in &self.graphql.custom { + custom.condition.validate(None).map_err(|err| { + format!("error for custom graphql instrument {name:?} in condition: {err}") + })?; + } + for (name, custom) in &self.cache.custom { + custom.condition.validate(None).map_err(|err| { + format!("error for custom cache instrument {name:?} in condition: {err}") + })?; + } + + Ok(()) + } + /// Update the defaults for spans configuration regarding the `default_attribute_requirement_level` pub(crate) fn update_defaults(&mut self) { self.router diff --git a/apollo-router/src/plugins/telemetry/config_new/mod.rs b/apollo-router/src/plugins/telemetry/config_new/mod.rs index 2a3f46edcf..082d0a438e 100644 --- a/apollo-router/src/plugins/telemetry/config_new/mod.rs +++ b/apollo-router/src/plugins/telemetry/config_new/mod.rs @@ -1,3 +1,4 @@ +use events::EventOn; use opentelemetry::baggage::BaggageExt; use opentelemetry::trace::TraceContextExt; use opentelemetry::trace::TraceId; @@ -51,7 +52,42 @@ pub(crate) trait Selectors { } } -pub(crate) trait Selector { +#[allow(dead_code)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub(crate) enum Stage { + Request, + Response, + ResponseEvent, + ResponseField, + Error, + Drop, +} + +impl std::fmt::Display for Stage { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Stage::Request => write!(f, "request"), + Stage::Response => write!(f, "response"), + Stage::ResponseEvent => write!(f, "response_event"), + Stage::ResponseField => write!(f, "response_field"), + Stage::Error => write!(f, "error"), + Stage::Drop => write!(f, "drop"), + } + } +} + +impl From for Stage { + fn from(value: EventOn) -> Self { + match value { + EventOn::Request => Self::Request, + EventOn::Response => Self::Response, + EventOn::EventResponse => Self::ResponseEvent, + EventOn::Error => Self::Error, + } + } +} + +pub(crate) trait Selector: std::fmt::Debug { type Request; type Response; type EventResponse; @@ -79,6 +115,8 @@ pub(crate) trait Selector { fn on_drop(&self) -> Option { None } + + fn is_active(&self, stage: Stage) -> bool; } pub(crate) trait DefaultForLevel { diff --git a/apollo-router/src/plugins/telemetry/config_new/selectors.rs b/apollo-router/src/plugins/telemetry/config_new/selectors.rs index 2f8aa1faf6..9047764a80 100644 --- a/apollo-router/src/plugins/telemetry/config_new/selectors.rs +++ b/apollo-router/src/plugins/telemetry/config_new/selectors.rs @@ -805,6 +805,55 @@ impl Selector for RouterSelector { _ => None, } } + + fn is_active(&self, stage: super::Stage) -> bool { + match stage { + super::Stage::Request => { + matches!( + self, + RouterSelector::RequestHeader { .. } + | RouterSelector::RequestMethod { .. } + | RouterSelector::TraceId { .. } + | RouterSelector::StudioOperationId { .. } + | RouterSelector::Baggage { .. } + | RouterSelector::Static(_) + | RouterSelector::Env { .. } + | RouterSelector::StaticField { .. } + ) + } + super::Stage::Response | super::Stage::ResponseEvent => matches!( + self, + RouterSelector::TraceId { .. } + | RouterSelector::StudioOperationId { .. } + | RouterSelector::OperationName { .. } + | RouterSelector::Baggage { .. } + | RouterSelector::Static(_) + | RouterSelector::Env { .. } + | RouterSelector::StaticField { .. } + | RouterSelector::ResponseHeader { .. } + | RouterSelector::ResponseContext { .. } + | RouterSelector::ResponseStatus { .. } + | RouterSelector::OnGraphQLError { .. } + ), + super::Stage::ResponseField => false, + super::Stage::Error => matches!( + self, + RouterSelector::TraceId { .. } + | RouterSelector::StudioOperationId { .. } + | RouterSelector::OperationName { .. } + | RouterSelector::Baggage { .. } + | RouterSelector::Static(_) + | RouterSelector::Env { .. } + | RouterSelector::StaticField { .. } + | RouterSelector::ResponseContext { .. } + | RouterSelector::Error { .. } + ), + super::Stage::Drop => matches!( + self, + RouterSelector::Static(_) | RouterSelector::StaticField { .. } + ), + } + } } impl Selector for SupergraphSelector { @@ -1173,6 +1222,66 @@ impl Selector for SupergraphSelector { _ => None, } } + + fn is_active(&self, stage: super::Stage) -> bool { + match stage { + super::Stage::Request => matches!( + self, + SupergraphSelector::OperationName { .. } + | SupergraphSelector::OperationKind { .. } + | SupergraphSelector::Query { .. } + | SupergraphSelector::RequestHeader { .. } + | SupergraphSelector::QueryVariable { .. } + | SupergraphSelector::RequestContext { .. } + | SupergraphSelector::Baggage { .. } + | SupergraphSelector::Env { .. } + | SupergraphSelector::Static(_) + | SupergraphSelector::StaticField { .. } + ), + super::Stage::Response => matches!( + self, + SupergraphSelector::Query { .. } + | SupergraphSelector::ResponseHeader { .. } + | SupergraphSelector::ResponseStatus { .. } + | SupergraphSelector::ResponseContext { .. } + | SupergraphSelector::OnGraphQLError { .. } + | SupergraphSelector::OperationName { .. } + | SupergraphSelector::OperationKind { .. } + | SupergraphSelector::IsPrimaryResponse { .. } + | SupergraphSelector::Static(_) + | SupergraphSelector::StaticField { .. } + ), + super::Stage::ResponseEvent => matches!( + self, + SupergraphSelector::ResponseData { .. } + | SupergraphSelector::ResponseErrors { .. } + | SupergraphSelector::Cost { .. } + | SupergraphSelector::OnGraphQLError { .. } + | SupergraphSelector::OperationName { .. } + | SupergraphSelector::OperationKind { .. } + | SupergraphSelector::IsPrimaryResponse { .. } + | SupergraphSelector::ResponseContext { .. } + | SupergraphSelector::Static(_) + | SupergraphSelector::StaticField { .. } + ), + super::Stage::ResponseField => false, + super::Stage::Error => matches!( + self, + SupergraphSelector::OperationName { .. } + | SupergraphSelector::OperationKind { .. } + | SupergraphSelector::Query { .. } + | SupergraphSelector::Error { .. } + | SupergraphSelector::Static(_) + | SupergraphSelector::StaticField { .. } + | SupergraphSelector::ResponseContext { .. } + | SupergraphSelector::IsPrimaryResponse { .. } + ), + super::Stage::Drop => matches!( + self, + SupergraphSelector::Static(_) | SupergraphSelector::StaticField { .. } + ), + } + } } impl Selector for SubgraphSelector { @@ -1548,6 +1657,63 @@ impl Selector for SubgraphSelector { _ => None, } } + + fn is_active(&self, stage: super::Stage) -> bool { + match stage { + super::Stage::Request => matches!( + self, + SubgraphSelector::SubgraphOperationName { .. } + | SubgraphSelector::SupergraphOperationName { .. } + | SubgraphSelector::SubgraphName { .. } + | SubgraphSelector::SubgraphOperationKind { .. } + | SubgraphSelector::SupergraphOperationKind { .. } + | SubgraphSelector::SupergraphQuery { .. } + | SubgraphSelector::SubgraphQuery { .. } + | SubgraphSelector::SubgraphQueryVariable { .. } + | SubgraphSelector::SupergraphQueryVariable { .. } + | SubgraphSelector::SubgraphRequestHeader { .. } + | SubgraphSelector::SupergraphRequestHeader { .. } + | SubgraphSelector::RequestContext { .. } + | SubgraphSelector::Baggage { .. } + | SubgraphSelector::Env { .. } + | SubgraphSelector::Static(_) + | SubgraphSelector::StaticField { .. } + ), + super::Stage::Response => matches!( + self, + SubgraphSelector::SubgraphResponseHeader { .. } + | SubgraphSelector::SubgraphResponseStatus { .. } + | SubgraphSelector::SubgraphOperationKind { .. } + | SubgraphSelector::SupergraphOperationKind { .. } + | SubgraphSelector::SupergraphOperationName { .. } + | SubgraphSelector::SubgraphName { .. } + | SubgraphSelector::SubgraphResponseBody { .. } + | SubgraphSelector::SubgraphResponseData { .. } + | SubgraphSelector::SubgraphResponseErrors { .. } + | SubgraphSelector::ResponseContext { .. } + | SubgraphSelector::OnGraphQLError { .. } + | SubgraphSelector::Static(_) + | SubgraphSelector::StaticField { .. } + | SubgraphSelector::Cache { .. } + ), + super::Stage::ResponseEvent => false, + super::Stage::ResponseField => false, + super::Stage::Error => matches!( + self, + SubgraphSelector::SubgraphOperationKind { .. } + | SubgraphSelector::SupergraphOperationKind { .. } + | SubgraphSelector::SupergraphOperationName { .. } + | SubgraphSelector::Error { .. } + | SubgraphSelector::Static(_) + | SubgraphSelector::StaticField { .. } + | SubgraphSelector::ResponseContext { .. } + ), + super::Stage::Drop => matches!( + self, + SubgraphSelector::Static(_) | SubgraphSelector::StaticField { .. } + ), + } + } } #[cfg(test)] diff --git a/apollo-router/src/plugins/telemetry/config_new/snapshots/apollo_router__plugins__telemetry__config_new__events__tests__supergraph_events_with_exists_condition@logs.snap b/apollo-router/src/plugins/telemetry/config_new/snapshots/apollo_router__plugins__telemetry__config_new__events__tests__supergraph_events_with_exists_condition@logs.snap new file mode 100644 index 0000000000..0c9630144c --- /dev/null +++ b/apollo-router/src/plugins/telemetry/config_new/snapshots/apollo_router__plugins__telemetry__config_new__events__tests__supergraph_events_with_exists_condition@logs.snap @@ -0,0 +1,22 @@ +--- +source: apollo-router/src/plugins/telemetry/config_new/events.rs +expression: yaml +--- +- fields: + kind: my.event + level: INFO + message: Auditing Router Event + span: + apollo_private.field_level_instrumentation_ratio: 0.01 + apollo_private.graphql.variables: "{}" + graphql.document: "query Test { foo }" + graphql.operation.name: Test + name: supergraph + otel.kind: INTERNAL + spans: + - apollo_private.field_level_instrumentation_ratio: 0.01 + apollo_private.graphql.variables: "{}" + graphql.document: "query Test { foo }" + graphql.operation.name: Test + name: supergraph + otel.kind: INTERNAL diff --git a/apollo-router/src/plugins/telemetry/config_new/spans.rs b/apollo-router/src/plugins/telemetry/config_new/spans.rs index ff4a3b00a0..61dfb0f35c 100644 --- a/apollo-router/src/plugins/telemetry/config_new/spans.rs +++ b/apollo-router/src/plugins/telemetry/config_new/spans.rs @@ -53,6 +53,26 @@ impl Spans { TelemetryDataKind::Traces, ); } + + pub(crate) fn validate(&self) -> Result<(), String> { + for (name, custom) in &self.router.attributes.custom { + custom + .validate() + .map_err(|err| format!("error for router span attribute {name:?}: {err}"))?; + } + for (name, custom) in &self.supergraph.attributes.custom { + custom + .validate() + .map_err(|err| format!("error for supergraph span attribute {name:?}: {err}"))?; + } + for (name, custom) in &self.subgraph.attributes.custom { + custom + .validate() + .map_err(|err| format!("error for subgraph span attribute {name:?}: {err}"))?; + } + + Ok(()) + } } #[derive(Deserialize, JsonSchema, Clone, Debug, Default)] diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index 8b2aefe4a9..345d1936ee 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -290,6 +290,9 @@ impl Plugin for Telemetry { config.instrumentation.spans.update_defaults(); config.instrumentation.instruments.update_defaults(); config.exporters.logging.validate()?; + if let Err(err) = config.instrumentation.validate() { + ::tracing::warn!("Potential configuration error for 'instrumentation': {err}, please check the documentation on https://www.apollographql.com/docs/router/configuration/telemetry/instrumentation/events"); + } let field_level_instrumentation_ratio = config.calculate_field_level_instrumentation_ratio()?; diff --git a/apollo-router/src/plugins/telemetry/testdata/custom_events_exists_condition.router.yaml b/apollo-router/src/plugins/telemetry/testdata/custom_events_exists_condition.router.yaml new file mode 100644 index 0000000000..0ee5b021f7 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/testdata/custom_events_exists_condition.router.yaml @@ -0,0 +1,13 @@ +telemetry: + instrumentation: + events: + supergraph: + my.event: + message: "Auditing Router Event" + level: info + on: request + attributes: + graphql.operation.name: true + condition: + exists: + operation_name: string \ No newline at end of file From 438b5c7f2c32fc7c512e34d9a81530ac374e29bf Mon Sep 17 00:00:00 2001 From: Iryna Shestak Date: Thu, 22 Aug 2024 17:05:45 +0200 Subject: [PATCH 090/108] feat: enable both_best_effort query planners (#5860) Formally switches #[default] to both_best_effort for experimental_query_planner_mode and adds a changelog entry. As part of this commit, we changed cache keys in redis integration tests. Redis cache key uses the planner_mode kind as part of its cache key. Since we are enabling `both_best_effort`, the cache key will change, and we have to update its keys. --- ..._enabling_both_best_effort_query_planners.md | 17 +++++++++++++++++ apollo-router/src/configuration/mod.rs | 2 +- apollo-router/tests/integration/redis.rs | 14 +++++++------- 3 files changed, 25 insertions(+), 8 deletions(-) create mode 100644 .changesets/feat_enabling_both_best_effort_query_planners.md diff --git a/.changesets/feat_enabling_both_best_effort_query_planners.md b/.changesets/feat_enabling_both_best_effort_query_planners.md new file mode 100644 index 0000000000..75b236dc98 --- /dev/null +++ b/.changesets/feat_enabling_both_best_effort_query_planners.md @@ -0,0 +1,17 @@ +### Enable native (rust) query planner to run in the background ([PR #5790](https://github.com/apollographql/router/pull/5790), [PR #5811](https://github.com/apollographql/router/pull/5811), [PR #5771](https://github.com/apollographql/router/pull/5771), [PR #5860](https://github.com/apollographql/router/pull/5860)) + +The router now schedules background jobs to run the native query planner in +order to compare its results to the legacy implementation. This is one of the +ways to help us ascertain its correctness before making a decision to switch +entirely to the native planner. + +The legacy query planner implementation continues to be used to plan and execute +operations, so there is no effect on the hot path. + +You can disable running background comparisons in the native query planner by +enabling just the `legacy` mode in router.yaml: +```yaml +experimental_query_planner_mode: legacy +``` + +By [SimonSapin](https://github.com/SimonSapin) in ([PR #5790](https://github.com/apollographql/router/pull/5790), [PR #5811](https://github.com/apollographql/router/pull/5811), [PR #5771](https://github.com/apollographql/router/pull/5771) [PR #5860](https://github.com/apollographql/router/pull/5860)) \ No newline at end of file diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index 2559b69458..bbadd41430 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -226,7 +226,6 @@ pub(crate) enum QueryPlannerMode { /// (such as using legacy Apollo Federation 1) New, /// Use the old JavaScript-based implementation. - #[default] Legacy, /// Use primarily the Javascript-based implementation, /// but also schedule background jobs to run the Rust implementation and compare results, @@ -243,6 +242,7 @@ pub(crate) enum QueryPlannerMode { /// Falls back to `legacy` with a warning /// if the the new planner does not support the schema /// (such as using legacy Apollo Federation 1) + #[default] BothBestEffort, } diff --git a/apollo-router/tests/integration/redis.rs b/apollo-router/tests/integration/redis.rs index 6b0ff6b404..6fb3b5d381 100644 --- a/apollo-router/tests/integration/redis.rs +++ b/apollo-router/tests/integration/redis.rs @@ -26,7 +26,7 @@ async fn query_planner_cache() -> Result<(), BoxError> { // 2. run `docker compose up -d` and connect to the redis container by running `docker-compose exec redis /bin/bash`. // 3. Run the `redis-cli` command from the shell and start the redis `monitor` command. // 4. Run this test and yank the updated cache key from the redis logs. - let known_cache_key = "plan:0:v2.8.3:16385ebef77959fcdc520ad507eb1f7f7df28f1d54a0569e3adabcb4cd00d7ce:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:3106dfc3339d8c3f3020434024bff0f566a8be5995199954db5a7525a7d7e67a"; + let known_cache_key = "plan:0:v2.8.3:16385ebef77959fcdc520ad507eb1f7f7df28f1d54a0569e3adabcb4cd00d7ce:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:8ecc6cbc98bab2769e6666a72ba47a4ebd90e6f62256ddcbdc7f352a805e0fe6"; let config = RedisConfig::from_url("redis://127.0.0.1:6379").unwrap(); let client = RedisClient::new(config, None, None, None); @@ -921,7 +921,7 @@ async fn connection_failure_blocks_startup() { async fn query_planner_redis_update_query_fragments() { test_redis_query_plan_config_update( include_str!("fixtures/query_planner_redis_config_update_query_fragments.router.yaml"), - "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:9054d19854e1d9e282ac7645c612bc70b8a7143d43b73d44dade4a5ec43938b4", + "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:cda2b4e476fdce9c4c435627b26cedd177cfbe04ab335fc3e3d895c0d79d965e", ) .await; } @@ -940,7 +940,7 @@ async fn query_planner_redis_update_planner_mode() { async fn query_planner_redis_update_introspection() { test_redis_query_plan_config_update( include_str!("fixtures/query_planner_redis_config_update_introspection.router.yaml"), - "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:04b3051125b5994fba6b0a22b2d8b4246cadc145be030c491a3431655d2ba07a", + "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:259dd917e4de09b5469629849b91e8ffdfbed2587041fad68b5963369bb13283", ) .await; } @@ -949,7 +949,7 @@ async fn query_planner_redis_update_introspection() { async fn query_planner_redis_update_defer() { test_redis_query_plan_config_update( include_str!("fixtures/query_planner_redis_config_update_defer.router.yaml"), - "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:3b7241b0db2cd878b79c0810121953ba544543f3cb2692aaf1a59184470747b0", + "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:e4376fe032160ce16399e520c6e815da6cb5cf4dc94a06175b86b64a9bf80201", ) .await; } @@ -960,7 +960,7 @@ async fn query_planner_redis_update_type_conditional_fetching() { include_str!( "fixtures/query_planner_redis_config_update_type_conditional_fetching.router.yaml" ), - "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:0ca695a8c4c448b65fa04229c663f44150af53b184ebdcbb0ad6862290efed76", + "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:83d899fcb42d2202c39fc8350289b8247021da00ecf3d844553c190c49410507", ) .await; } @@ -971,7 +971,7 @@ async fn query_planner_redis_update_reuse_query_fragments() { include_str!( "fixtures/query_planner_redis_config_update_reuse_query_fragments.router.yaml" ), - "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:f7c04319556397ec4b550aa5aaa96c73689cee09026b661b6a9fc20b49e6fa77", + "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:d48f92f892bd67071694c0538a7e657ff8e0c52e1718f475190c17b503e9e8c3", ) .await; } @@ -994,7 +994,7 @@ async fn test_redis_query_plan_config_update(updated_config: &str, new_cache_key router.assert_started().await; router.clear_redis_cache().await; - let starting_key = "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:4a5827854a6d2efc85045f0d5bede402e15958390f1073d2e77df56188338e5a"; + let starting_key = "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:0966f1528d47cee30b6140a164be16148dd360ee10b87744991e9d35af8e8a27"; router.execute_default_query().await; router.assert_redis_cache_contains(starting_key, None).await; router.update_config(updated_config).await; From 85656489e41d0ff1fedbcc9217b20ccef21b0985 Mon Sep 17 00:00:00 2001 From: Taylor Ninesling Date: Thu, 22 Aug 2024 13:41:25 -0700 Subject: [PATCH 091/108] Make demand control GA (#5868) Co-authored-by: Edward Huang --- .../feat_tninesling_make_demand_control_ga.md | 9 + apollo-router/src/configuration/metrics.rs | 4 +- ...nfiguration__tests__schema_generation.snap | 8 +- .../metrics/demand_control.router.yaml | 2 +- .../enforce_on_execution_request.router.yaml | 2 +- .../enforce_on_execution_response.router.yaml | 2 +- .../enforce_on_subgraph_request.router.yaml | 2 +- .../enforce_on_subgraph_response.router.yaml | 2 +- .../measure_on_execution_request.router.yaml | 2 +- .../measure_on_execution_response.router.yaml | 2 +- .../measure_on_subgraph_request.router.yaml | 2 +- .../measure_on_subgraph_response.router.yaml | 2 +- .../src/plugins/demand_control/mod.rs | 2 +- .../demand_control_delta_filter.router.yaml | 2 +- ...emand_control_result_attribute.router.yaml | 2 +- .../demand_control_result_filter.router.yaml | 2 +- apollo-router/src/router_factory.rs | 2 +- .../src/uplink/license_enforcement.rs | 2 +- ..._test__restricted_features_via_config.snap | 2 +- .../uplink/testdata/restricted.router.yaml | 2 +- apollo-router/tests/apollo_reports.rs | 4 +- .../tests/fixtures/apollo_reports.router.yaml | 2 +- .../fixtures/apollo_reports_batch.router.yaml | 2 +- .../executing-operations/demand-control.mdx | 717 +++++++----------- 24 files changed, 292 insertions(+), 488 deletions(-) create mode 100644 .changesets/feat_tninesling_make_demand_control_ga.md diff --git a/.changesets/feat_tninesling_make_demand_control_ga.md b/.changesets/feat_tninesling_make_demand_control_ga.md new file mode 100644 index 0000000000..8f99b75478 --- /dev/null +++ b/.changesets/feat_tninesling_make_demand_control_ga.md @@ -0,0 +1,9 @@ +### General Availability (GA) of Demand Control ([PR #5868](https://github.com/apollographql/router/pull/5868)) + +Demand control in the router is now a generally available (GA) feature. + +**GA compatibility update**: if you used demand control during its preview, to use it in GA you must update your configuration from `preview_demand_control` to `demand_control`. + +To learn more, go to [Demand Control](https://www.apollographql.com/docs/router/executing-operations/demand-control/) docs. + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5868 diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index 8cd6b56381..7e18720703 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -378,7 +378,7 @@ impl InstrumentData { populate_config_instrument!( apollo.router.config.demand_control, - "$.preview_demand_control[?(@.enabled == true)]", + "$.demand_control[?(@.enabled == true)]", opt.mode, "$.mode" ); @@ -400,7 +400,7 @@ impl InstrumentData { Self::get_first_key_from_path( demand_control_attributes, "opt.strategy", - "$.preview_demand_control[?(@.enabled == true)].strategy", + "$.demand_control[?(@.enabled == true)].strategy", yaml, ); } diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 37a2a352b9..231cef434b 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -8298,6 +8298,10 @@ expression: "&schema" "$ref": "#/definitions/CSRFConfig", "description": "#/definitions/CSRFConfig" }, + "demand_control": { + "$ref": "#/definitions/DemandControlConfig", + "description": "#/definitions/DemandControlConfig" + }, "experimental_apollo_metrics_generation_mode": { "$ref": "#/definitions/ApolloMetricsGenerationMode", "description": "#/definitions/ApolloMetricsGenerationMode" @@ -8351,10 +8355,6 @@ expression: "&schema" "$ref": "#/definitions/Plugins", "description": "#/definitions/Plugins" }, - "preview_demand_control": { - "$ref": "#/definitions/DemandControlConfig", - "description": "#/definitions/DemandControlConfig" - }, "preview_entity_cache": { "$ref": "#/definitions/Config6", "description": "#/definitions/Config6" diff --git a/apollo-router/src/configuration/testdata/metrics/demand_control.router.yaml b/apollo-router/src/configuration/testdata/metrics/demand_control.router.yaml index a78a0870ce..c83294d0d0 100644 --- a/apollo-router/src/configuration/testdata/metrics/demand_control.router.yaml +++ b/apollo-router/src/configuration/testdata/metrics/demand_control.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: measure strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_request.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_request.router.yaml index 43b492c8ad..131a3cc470 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_request.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_request.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: enforce strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_response.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_response.router.yaml index deb3908da5..d3bcba889f 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_response.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_response.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: enforce strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_request.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_request.router.yaml index dc83e08c34..bb77fa7031 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_request.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_request.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: enforce strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_response.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_response.router.yaml index 56fd39e585..8d1a364728 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_response.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_response.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: enforce strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_request.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_request.router.yaml index c96a6908bc..4e2a2f5463 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_request.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_request.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: measure strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_response.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_response.router.yaml index a7422da35b..6256ca53b4 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_response.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_response.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: measure strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_request.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_request.router.yaml index c96a6908bc..4e2a2f5463 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_request.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_request.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: measure strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_response.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_response.router.yaml index a7422da35b..6256ca53b4 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_response.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_response.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: measure strategy: diff --git a/apollo-router/src/plugins/demand_control/mod.rs b/apollo-router/src/plugins/demand_control/mod.rs index bf0cdf5f26..b3faaef747 100644 --- a/apollo-router/src/plugins/demand_control/mod.rs +++ b/apollo-router/src/plugins/demand_control/mod.rs @@ -411,7 +411,7 @@ impl Plugin for DemandControl { } } -register_plugin!("apollo", "preview_demand_control", DemandControl); +register_plugin!("apollo", "demand_control", DemandControl); #[cfg(test)] mod test { diff --git a/apollo-router/src/plugins/telemetry/testdata/demand_control_delta_filter.router.yaml b/apollo-router/src/plugins/telemetry/testdata/demand_control_delta_filter.router.yaml index 09d2948319..5b2e55a772 100644 --- a/apollo-router/src/plugins/telemetry/testdata/demand_control_delta_filter.router.yaml +++ b/apollo-router/src/plugins/telemetry/testdata/demand_control_delta_filter.router.yaml @@ -1,5 +1,5 @@ # Demand control enabled in measure mode. -preview_demand_control: +demand_control: enabled: true # Use measure mode to monitor the costs of your operations without rejecting any. mode: measure diff --git a/apollo-router/src/plugins/telemetry/testdata/demand_control_result_attribute.router.yaml b/apollo-router/src/plugins/telemetry/testdata/demand_control_result_attribute.router.yaml index 6dc88e995c..52e2d42dcd 100644 --- a/apollo-router/src/plugins/telemetry/testdata/demand_control_result_attribute.router.yaml +++ b/apollo-router/src/plugins/telemetry/testdata/demand_control_result_attribute.router.yaml @@ -1,5 +1,5 @@ # Demand control enabled in measure mode. -preview_demand_control: +demand_control: enabled: true # Use measure mode to monitor the costs of your operations without rejecting any. mode: measure diff --git a/apollo-router/src/plugins/telemetry/testdata/demand_control_result_filter.router.yaml b/apollo-router/src/plugins/telemetry/testdata/demand_control_result_filter.router.yaml index 1b78a1e15e..b01ddf9d81 100644 --- a/apollo-router/src/plugins/telemetry/testdata/demand_control_result_filter.router.yaml +++ b/apollo-router/src/plugins/telemetry/testdata/demand_control_result_filter.router.yaml @@ -1,5 +1,5 @@ # Demand control enabled in measure mode. -preview_demand_control: +demand_control: enabled: true # Use measure mode to monitor the costs of your operations without rejecting any. mode: measure diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index e110726712..3d21e8a6bd 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -699,7 +699,7 @@ pub(crate) async fn create_plugins( // This relative ordering is documented in `docs/source/customizations/native.mdx`: add_optional_apollo_plugin!("rhai"); add_optional_apollo_plugin!("coprocessor"); - add_optional_apollo_plugin!("preview_demand_control"); + add_optional_apollo_plugin!("demand_control"); add_user_plugins!(); // Macros above remove from `apollo_plugin_factories`, so anything left at the end diff --git a/apollo-router/src/uplink/license_enforcement.rs b/apollo-router/src/uplink/license_enforcement.rs index 2d77fb3683..743fbbe543 100644 --- a/apollo-router/src/uplink/license_enforcement.rs +++ b/apollo-router/src/uplink/license_enforcement.rs @@ -384,7 +384,7 @@ impl LicenseEnforcementReport { .name("Batching support") .build(), ConfigurationRestriction::builder() - .path("$.preview_demand_control") + .path("$.demand_control") .name("Demand control plugin") .build(), ConfigurationRestriction::builder() diff --git a/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap b/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap index 70f682b0ca..baa48d4a8a 100644 --- a/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap +++ b/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap @@ -55,7 +55,7 @@ Configuration yaml: .preview_file_uploads * Demand control plugin - .preview_demand_control + .demand_control * Apollo metrics extended references .telemetry.apollo.experimental_apollo_metrics_reference_mode diff --git a/apollo-router/src/uplink/testdata/restricted.router.yaml b/apollo-router/src/uplink/testdata/restricted.router.yaml index 67c50cac7d..b354a9a239 100644 --- a/apollo-router/src/uplink/testdata/restricted.router.yaml +++ b/apollo-router/src/uplink/testdata/restricted.router.yaml @@ -92,7 +92,7 @@ preview_file_uploads: enabled: true mode: stream -preview_demand_control: +demand_control: enabled: true mode: measure strategy: diff --git a/apollo-router/tests/apollo_reports.rs b/apollo-router/tests/apollo_reports.rs index a46f303188..006c3d8e97 100644 --- a/apollo-router/tests/apollo_reports.rs +++ b/apollo-router/tests/apollo_reports.rs @@ -92,10 +92,10 @@ async fn config( Some(serde_json::Value::Bool(use_legacy_request_span)) }) .expect("Could not sub in endpoint"); - config = jsonpath_lib::replace_with(config, "$.preview_demand_control.enabled", &mut |_| { + config = jsonpath_lib::replace_with(config, "$.demand_control.enabled", &mut |_| { Some(serde_json::Value::Bool(demand_control)) }) - .expect("Could not sub in preview_demand_control"); + .expect("Could not sub in demand_control"); config = jsonpath_lib::replace_with( config, diff --git a/apollo-router/tests/fixtures/apollo_reports.router.yaml b/apollo-router/tests/fixtures/apollo_reports.router.yaml index 81bcf4cd49..644e286ee7 100644 --- a/apollo-router/tests/fixtures/apollo_reports.router.yaml +++ b/apollo-router/tests/fixtures/apollo_reports.router.yaml @@ -3,7 +3,7 @@ include_subgraph_errors: rhai: scripts: tests/fixtures main: test_callbacks.rhai -preview_demand_control: +demand_control: mode: measure enabled: false strategy: diff --git a/apollo-router/tests/fixtures/apollo_reports_batch.router.yaml b/apollo-router/tests/fixtures/apollo_reports_batch.router.yaml index 387085b17e..e60791ebbc 100644 --- a/apollo-router/tests/fixtures/apollo_reports_batch.router.yaml +++ b/apollo-router/tests/fixtures/apollo_reports_batch.router.yaml @@ -6,7 +6,7 @@ rhai: main: test_callbacks.rhai include_subgraph_errors: all: true -preview_demand_control: +demand_control: mode: measure enabled: false strategy: diff --git a/docs/source/executing-operations/demand-control.mdx b/docs/source/executing-operations/demand-control.mdx index f2c5657a22..0757cb9eae 100644 --- a/docs/source/executing-operations/demand-control.mdx +++ b/docs/source/executing-operations/demand-control.mdx @@ -7,523 +7,210 @@ minVersion: 1.48.0 - +## What is demand control? -The Demand Control feature is in [preview](/resources/product-launch-stages/#product-launch-stages) for organizations with an Enterprise plan. Get in touch with your Apollo contact to request access. +Demand control provides a way to secure your supergraph from overly complex operations, based on the [IBM GraphQL Cost Directive specification](https://ibm.github.io/graphql-specs/cost-spec.html). -We welcome your feedback during the preview, especially feedback about the following: +Application clients can send overly costly operations that overload your supergraph infrastructure. These operations may be costly due to their complexity and/or their need for expensive resolvers. In either case, demand control can help you protect your infrastructure from these expensive operations. When your router receives a request, it calculates a cost for that operation. If the cost is greater than your configured maximum, the operation is rejected. -
+## Calculating cost -- Whether the available tools are sufficient to enable you to understand how users are querying your supergraph. +When calculating the cost of an operation, the router sums the costs of the sub-requests that it plans to send to your subgraphs. +* For each operation, the cost is the sum of its base cost plus the costs of its fields. +* For each field, the cost is defined recursively as its own base cost plus the cost of its selections. In the IBM specification, this is called [field cost](https://ibm.github.io/graphql-specs/cost-spec.html#sec-Field-Cost). -- Whether the demand control workflow is easy to follow and implement. - -- Whether any features are missing that preclude you from using demand control in production. - -
- -Protect your graph from malicious or demanding clients with GraphOS Router's demand control features. Estimate, calculate, observe, and reject high cost GraphQL operations. - -## About demand control - -Applications clients can send complex operations through your router that can overload your supergraph's infrastructure. The clients may be unintentionally or maliciously overloading your supergraph. - -When a client makes a request to the router, the router makes requests to your subgraphs to gather data for the final response. A client, however, may send an operation that's too complex for your subgraphs to process without degrading performance. - -Complex operations include operations that are deeply nested or have many results. Too many complex operations might overload your subgraphs and degrade the responsiveness and latency of your supergraph. - -To prevent complex operations from degrading performance, the GraphOS Router supports analyzing and rejecting requests based on operation complexity. Like [safelisting operations with persisted query lists (PQL)](/graphos/operations/persisted-queries), demand control enables you to reject operations that you don't want to be served by your graph. - -With demand control configured, the router computes a complexity value, or _cost_, per operation. You can collect telemetry and metrics to determine the range of costs of operations served by the router. You can then configure a maximum cost limit per operation, above which the router rejects the operation. - -## Demand control workflow - -Follow this workflow to configure and tune demand control for your router: - -1. Measure the cost of your existing operations. -2. Improve the cost estimation model. -3. Adjust your `preview_demand_control` configuration and enforce cost limits. - -### Measure cost of existing operations - -Start by measuring the costs of the operations served by your router. - -1. In your `router.yaml`, configure demand control to `measure` mode and define telemetry to monitor the results. For example: - - Set `preview_demand_control.mode` to `measure`. - - Define a custom histogram of operation costs. - -```yaml title="Example router.yaml to measure operation costs" -# Demand control enabled in measure mode. -preview_demand_control: - enabled: true - # Use measure mode to monitor the costs of your operations without rejecting any. - mode: measure - - strategy: - # Static estimated strategy has a fixed cost for elements. - static_estimated: - # The assumed returned list size for operations. Set this to the maximum number of items in a GraphQL list - list_size: 10 - # The maximum cost of a single operation, above which the operation is rejected. - max: 1000 - -# Basic telemetry configuration for cost. -telemetry: - exporters: - metrics: - common: - views: - # Define a custom view because cost is different than the default latency-oriented view of OpenTelemetry - - name: cost.* - aggregation: - histogram: - buckets: - - 0 - - 10 - - 100 - - 1000 - - 10000 - - 100000 - - 1000000 - - # Example configured for Prometheus. Customize for your APM. - prometheus: - enabled: true +The cost of each operation type: - # Basic instrumentation - instrumentation: - instruments: - supergraph: - cost.actual: true # The actual cost - cost.estimated: # The estimated cost - attributes: - cost.result: true # Of the estimated costs which of these would have been rejected - cost.delta: true # Actual - estimated +| | Mutation | Query | Subscription | +| ---- | -------- | ----- | ------------ | +| type | 10 | 0 | 0 | -``` +The cost of each GraphQL element type, per operation type: - +| | Mutation | Query | Subscription | +| --------- | -------- | ----- | ------------ | +| Object | 1 | 1 | 1 | +| Interface | 1 | 1 | 1 | +| Union | 1 | 1 | 1 | +| Scalar | 0 | 0 | 0 | +| Enum | 0 | 0 | 0 | -When analyzing the costs of operations, if your histograms are not granular enough or don't cover a sufficient range, you can modify the views in your telemetry configuration: +Using these defaults, the following operation would have a cost of 4. -```yaml -telemetry: - exporters: - metrics: - common: - views: - - name: cost.* - aggregation: - histogram: - buckets: - - 0 # Define the buckets here - - 10 - - 100 - - 1000 # More granularity for costs in the 1000s - - 2000 - - 3000 - - 4000 +```graphql +query BookQuery { + book(id: 1) { + title + author { + name + } + publisher { + name + address { + zipCode + } + } + } +} ``` - - -2. Send some requests through your router and observe the `cost.*` metrics via your APM. - -You should be able to configure your APM to look for `cost.*` histograms and get the proportion of requests that would be rejected via the `cost.result` attribute on the `cost.estimated` total. This will allow you to see histograms of cost. - -An example histogram of operation costs from a Prometheus endpoint: + ```text disableCopy=true showLineNumbers=false -# TYPE cost_actual histogram -cost_actual_bucket{otel_scope_name="apollo/router",le="0"} 0 -cost_actual_bucket{otel_scope_name="apollo/router",le="10"} 3 -cost_actual_bucket{otel_scope_name="apollo/router",le="100"} 5 -cost_actual_bucket{otel_scope_name="apollo/router",le="1000"} 11 -cost_actual_bucket{otel_scope_name="apollo/router",le="10000"} 19 -cost_actual_bucket{otel_scope_name="apollo/router",le="100000"} 20 -cost_actual_bucket{otel_scope_name="apollo/router",le="1000000"} 20 -cost_actual_bucket{otel_scope_name="apollo/router",le="+Inf"} 20 -cost_actual_sum{otel_scope_name="apollo/router"} 1097 -cost_actual_count{otel_scope_name="apollo/router"} 20 -# TYPE cost_delta histogram -cost_delta_bucket{otel_scope_name="apollo/router",le="0"} 0 -cost_delta_bucket{otel_scope_name="apollo/router",le="10"} 2 -cost_delta_bucket{otel_scope_name="apollo/router",le="100"} 9 -cost_delta_bucket{otel_scope_name="apollo/router",le="1000"} 7 -cost_delta_bucket{otel_scope_name="apollo/router",le="10000"} 19 -cost_delta_bucket{otel_scope_name="apollo/router",le="100000"} 20 -cost_delta_bucket{otel_scope_name="apollo/router",le="1000000"} 20 -cost_delta_bucket{otel_scope_name="apollo/router",le="+Inf"} 20 -cost_delta_sum{otel_scope_name="apollo/router"} 21934 -cost_delta_count{otel_scope_name="apollo/router"} 1 -# TYPE cost_estimated histogram -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="0"} 0 -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="10"} 5 -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="100"} 5 -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="1000"} 9 -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="10000"} 11 -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="100000"} 20 -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="1000000"} 20 -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="+Inf"} 20 -cost_estimated_sum{cost_result="COST_OK",otel_scope_name="apollo/router"} -cost_estimated_count{cost_result="COST_OK",otel_scope_name="apollo/router"} 20 +1 Query (0) + 1 book object (1) + 1 author object (1) + 1 publisher object (1) + 1 address object (1) = 4 total cost ``` -An example chart of a histogram: - - - - -You can also chart the percentage of operations that would be allowed or rejected with the current configuration: - - - -Although estimated costs won't necessarily match actual costs, you can use the metrics to ascertain the following: -- Whether any operations have underestimated costs -- What to set `static_estimated.list_size` as the actual maximum list size -- What to set `static_estimated.max` as the maximum cost of an allowed operation - -In this example, just under half of the requests would be rejected with the current configuration. The cost of queries are also underestimated because `cost.delta` is non-zero. - -3. To figure out what operations are being rejected, define a telemetry custom instrument that reports when an operation has been rejected because its cost exceeded the configured cost limit: - -```yaml title="router.yaml" -telemetry: - instrumentation: - instruments: - supergraph: - # custom instrument - cost.rejected.operations: - type: histogram - value: - # Estimated cost is used to populate the histogram - cost: estimated - description: "Estimated cost per rejected operation." - unit: delta - condition: - eq: - # Only show rejected operations. - - cost: result - - "COST_ESTIMATED_TOO_EXPENSIVE" - attributes: - graphql.operation.name: true # Graphql operation name is added as an attribute - -``` - -This custom instrument may not be suitable when you have many operation names, such as a public internet-facing API. You can add conditions to reduce the number of returned operations. For example, use a condition that outputs results only when the cost delta is greater than a threshold: - -```yaml title="router.yaml" -telemetry: - instrumentation: - instruments: - supergraph: - # custom instrument - cost.rejected.operations: - type: histogram - value: - # Estimated cost is used to populate the histogram - cost: estimated - description: "Estimated cost per rejected operation." - unit: delta - condition: - all: - - eq: # Only show rejected operations - - cost: result - - "COST_ESTIMATED_TOO_EXPENSIVE" -#highlight-start - - gt: # Only show cost delta > 100 - - cost: delta - - 100 -#highlight-end -``` + -4. You should now be able to configure your APM to see which operations are too costly. Visualizing the histogram can be useful, such as with top-N or heatmap tools. +### Customizing cost -For example, the following table has the estimated cost of operations: +Since version 1.53.0, the router supports customizing the cost calculation with the `@cost` directive. The `@cost` directive has a single argument, `weight`, which overrides the default weights from the table above. -| Operation name | Estimated cost | -|----------------------|----------------| -| `ExtractAll` | 9020 | -| `GetAllProducts` | 1435 | -| `GetLatestProducts` | 120 | -| `GetRecentlyUpdated` | 99 | -| `FindProductByName` | 87 | + -The `ExtractAll` operation has a very large estimated cost, so it's a good candidate to be rejected. +The Apollo Federation [`@cost` directive](/federation/federated-schemas/federated-directives/#cost) differs from the IBM specification in that the `weight` argument is of type `Int!` instead of `String!`. -Also, the value of the `cost.delta` metric—the difference between the actual and estimated cost—shows whether the assumed list size used for cost estimation is too large or small. In this example, the positive `cost.delta` means that the actual list size is greater than the estimated list size. Therefore the `static_estimated.list_size` can be reduced to closer match the actual. + -### Improve cost estimation model +Annotating your schema with the `@cost` directive customizes how the router scores operations. For example, imagine that the `Address` resolver for an example query is particularly expensive. We can annotate the schema with the `@cost` directive with a larger weight: -You should iteratively improve your cost estimation model. Accurate cost estimation is critical to identifying and preventing queries that could harm your subgraphs. +```graphql +type Query { + book(id: ID): Book +} -The previous step identified a noticeable difference between actual and estimated costs with the example operations. You can better understand the difference—and consequently tune the configured list size—by adding telemetry instruments for fields in your GraphQL schema. +type Book { + title: String + author: Author + publisher: Publisher +} -For example, you can generate a histogram for every field in your GraphQL schema: +type Author { + name: String +} -```yaml title="router.yaml" -telemetry: - exporters: - metrics: - common: - views: - - name: graphql.* - aggregation: - histogram: - buckets: - - 0 - - 10 - - 100 - - 1000 - - 10000 - - 100000 - - 1000000 - instrumentation: - instruments: - graphql: - list.length: true +type Publisher { + name: String + address: Address +} +type Address + @cost(weight: 5) { #highlight-line + zipCode: Int! +} ``` -This configuration generates many metrics and may be too costly for your APM. To reduce the amount of metrics generated, you can set conditions on the instrument. - -For this example, you can set a condition that restricts the instrument to an operation with a certain name. You can also show only histograms of list sizes of GraphQL fields: +This increases the cost of `BookQuery` from 4 to 8. -```yaml title="router.yaml" -telemetry: - instrumentation: - instruments: - graphql: - graphql.list.length.restricted: # custom instrument - unit: length - description: "histogram of list lengths" - type: histogram - value: - list_length: value - condition: - all: - - eq: - - operation_name: string - - "GetAllProducts" -``` - -The output from a Prometheus endpoint may look like the following: + ```text disableCopy=true showLineNumbers=false -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="0"} 0 -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="10"} 9 -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="100"} 20 -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="1000"} 20 -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="10000"} 20 -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="100000"} 20 -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="1000000"} 20 -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="+Inf"} 20 -graphql_list_length_restricted_sum{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router"} 218 -graphql_list_length_restricted_count{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router"} 20 +1 Query (0) + 1 book object (1) + 1 author object (1) + 1 publisher object (1) + 1 address object (5) = 8 total cost ``` -You can configure your APM to chart the histogram: - - - -The chart shows that the actual list sizes for the `allProducts` field are at most 100, so you should update your `static_estimated.list_size` to be 100: - -```yaml title="router.yaml" -preview_demand_control: - enabled: true - mode: measure - strategy: - static_estimated: - list_size: 100 # Updated to measured actual max list size - max: 1000 -``` - -Rerunning the router and remeasuring costs with the updated `static_estimated.list_size` should result in new histograms and percentages of rejected operations. For example: - - - - - - -Although there are no more cost deltas reported, the estimated costs have increased. You still have to adjust the maximum cost. - -Looking at the top N operations, you may see that the estimated costs have been updated. For example: - -| Operation name | Estimated cost | -|----------------------|----------------| -| `ExtractAll` | 390200 | -| `GetAllProducts` | 44350 | -| `GetLatestProducts` | 11200 | -| `GetRecentlyUpdated` | 4990 | -| `FindProductByName` | 1870 | - -All operations except `ExtractAll` are in a range of acceptable costs. - - + -#### `@listSize` +### Handling list fields - +During the static analysis phase of demand control, the router doesn't know the size of the list fields in a given query. It must use estimates for list sizes. The closer the estimated list size is to the actual list size for a field, the closer the estimated cost will be to the actual cost. -If some of your fields have list sizes that significantly differ from `static_estimated.list_size`, you can provide the router with more information. + -The `@listSize` directive can be configured in multiple ways: +The difference between estimated and actual operation cost calculations is due only to the difference between assumed and actual sizes of list fields. -1. Use the `assumedSize` argument to define a static size for a field. -2. Use `slicingArguments` to indicate that a field's size is dynamically controlled by one or more of its arguments. This works well if some of the arguments are paging parameters. + -Learn more about the `@listSize` directive [here](/federation/federated-schemas/federated-directives/#listsize). +There are two ways to indicate the expected list sizes to the router: +* Set the global maximum in your router configuration file (see [Configuring demand control](#configuring-demand-control)). -### Enforce cost limits +* Use the Apollo Federation [@listSize directive](/federation/federated-schemas/federated-directives/#listsize). -After determining the cost estimation model of your operations, you should update and enforce the new cost limits. +The `@listSize` directive supports field-level granularity in setting list size. By using its `assumedSize` argument, you can set a statically defined list size for a field. If you are using paging parameters which control the size of the list, use the `slicingArguments` argument. -From the previous step, you can set the maximum cost to a value that allows all operations except `ExtractAll`: +Continuing with our example above, let's add two queryable fields. First, we will add a field which returns the top five best selling books: -```yaml title="router.yaml" -preview_demand_control: - enabled: true - mode: enforce # Change mode from measure to enforce - strategy: - static_estimated: - list_size: 100 - max: 50000 # Updated max cost allows all operations except ExtractAll +```graphql +type Query { + book(id: ID): Book + bestsellers: [Book] @listSize(assumedSize: 5) +} ``` -## Next steps - - -Continue to monitor the costs of operations and take action if the estimation model becomes inaccurate. For example, update the estimation model if the maximum number of list items changes. - -You can set alerts in your APM for events that may require changing your demand control settings. Events to alert include: -- Unexpected increase in the number of requests rejected by demand control. -- Increased max list size of your data. -- Increased delta metric. - - - -Using paging APIs can help avoid situations where a list field returns an arbitrarily large number of elements. - - - -## Calculating operation cost - -When your router receives a request, its query planner generates and sends a series of sub-requests to subgraphs. - -To calculate the total cost of an operation, the router sums the total costs based on sub-request's operation type and the types of GraphQL elements of its fields. - -The cost of each operation type: - -| | Mutation | Query | Subscription | -| --------- | -------- | ----- | ------------ | -| type | 10 | 0 | 0 | - - -The cost of each GraphQL element type, per operation type: - -| | Mutation | Query | Subscription | -| --------- | -------- | ----- | ------------ | -| Object | 1 | 1 | 1 | -| Interface | 1 | 1 | 1 | -| Union | 1 | 1 | 1 | -| Scalar | 0 | 0 | 0 | -| Enum | 0 | 0 | 0 | - -For example, assume the following query gets a response with six products and ten reviews: +With this schema, the following query has a cost of 40: ```graphql -query ExampleQuery { - topProducts { - name - reviews { - author { - name +query BestsellersQuery { + bestsellers { + title + author { + name + } + publisher { + name + address { + zipCode } } } } ``` -Assuming each review having exactly one author, the total cost of the query is 26. - - + ```text disableCopy=true showLineNumbers=false -1 Query (0 cost) + 6 product objects (6) + 6 name scalars (0) + 10 review objects (10) + 10 author objects (10) + 10 name scalars (0) = 26 total cost +1 Query (0) + 5 book objects (5 * (1 book object (1) + 1 author object (1) + 1 publisher object (1) + 1 address object (5))) = 40 total cost ``` - - -#### `@cost` - - - -You can further customize the cost calculation with the `@cost` directive. This directive takes a `weight` argument which replaces the default weights outlined above. - -Revisiting the products query above, if the `topProducts.name` field is annotated with `@cost(weight: 5)`, then the total cost of the query increases to 56. - - +The second field we will add is a paginated resolver. It returns the latest additions to the inventory: ```graphql type Query { - topProducts: [Product] -} - -type Product { - name: String! @cost(weight: 5) - reviews: [Review] + book(id: ID): Book + bestsellers: [Book] @listSize(assumedSize: 5) + #highlight-start + newestAdditions(after: ID, limit: Int!): [Book] + @listSize(slicingArguments: ["limit"]) + #highlight-end } +``` -type Review { - author: Author! -} +The number of books returned by this resolver is determined by the `limit` argument. -type Author { - name: String! +```graphql +query NewestAdditions { + newestAdditions(limit: 3) { + title + author { + name + } + publisher { + name + address { + zipCode + } + } + } } ``` - - - +The router will estimate the cost of this query as 24. If the limit was increased to 7, then the cost would increase to 56. ```text disableCopy=true showLineNumbers=false -1 Query (0 cost) + 6 product objects (6) + 6 name scalars (30) + 10 review objects (10) + 10 author objects (10) + 10 name scalars (0) = 56 total cost -``` - - - -Learn more about the `@cost` directive [here](/federation/federated-schemas/federated-directives/#cost). - -### Estimated and actual costs - -For an operation with list fields, the router must run the operation to get the actual number of items in its lists. Without actual list sizes, the cost of an operation can only be estimated before it's executed, where you assume the size of lists. - -After an operation is executed, the actual cost per operation can be calculated with the actual list sizes. - - - -The difference between estimated and actual operation cost calculations is due only to the difference between assumed and actual sizes of list fields. - - +When requesting 3 books: +1 Query (0) + 3 book objects (3 * (1 book object (1) + 1 author object (1) + 1 publisher object (1) + 1 address object (5))) = 24 total cost -### Measurement and enforcement modes - -When rolling out demand control, you first need to gather information about the queries that are already being executed against your graph so you can decide when to reject requests. - -The router's demand control features support a measurement mode that enables you to gather this information without impacting your running services. You can define telemetry instruments to monitor your operations and decide on their maximum cost threshold. - -After gathering enough data, you can then configure your router with maximum cost and list size limits and set demand control to enforcement mode, where it rejects operations with costs exceeding the limit. +When requesting 7 books: +1 Query (0) + 3 book objects (7 * (1 book object (1) + 1 author object (1) + 1 publisher object (1) + 1 address object (5))) = 56 total cost +``` ## Configuring demand control -To enable demand control in the router, configure the `preview_demand_control` option in `router.yaml`: +To enable demand control in the router, configure the `demand_control` option in `router.yaml`: ```yaml title="router.yaml" -preview_demand_control: +demand_control: enabled: true mode: measure strategy: @@ -532,18 +219,19 @@ preview_demand_control: max: 1000 ``` -When `preview_demand_control` is enabled, the router measures the cost of each operation and can enforce operation cost limits, based on additional configuration. +When `demand_control` is enabled, the router measures the cost of each operation and can enforce operation cost limits, based on additional configuration. -Customize `preview_demand_control` with the following settings: +Customize `demand_control` with the following settings: -| Option | Valid values | Default value | Description | -| ------------------- | ----------------------- | ------------- | ---------------------------------------------------------------------------------------------------- | -| `enabled` | boolean | `false` | Set `true` to measure operation costs or enforce operation cost limits. | -| `mode` | `measure`, `enforce` | -- | - `measure` collects information about the cost of operations.
- `enforce` rejects operations exceeding configured cost limits | -| `strategy` | `static_estimated` | -- | `static_estimated` estimates the cost of an operation before it is sent to a subgraph | -| `static_estimated.list_size` | integer | -- | The assumed maximum size of a list for fields that return lists. | -| `static_estimated.max` | integer | -- | The maximum cost of an accepted operation. An operation with a higher cost than this is rejected. | +| Option | Valid values | Default value | Description | +| ---------------------------- | -------------------- | ------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `enabled` | boolean | `false` | Set `true` to measure operation costs or enforce operation cost limits. | +| `mode` | `measure`, `enforce` | -- | - `measure` collects information about the cost of operations.
- `enforce` rejects operations exceeding configured cost limits | +| `strategy` | `static_estimated` | -- | `static_estimated` estimates the cost of an operation before it is sent to a subgraph | +| `static_estimated.list_size` | integer | -- | The assumed maximum size of a list for fields that return lists. | +| `static_estimated.max` | integer | -- | The maximum cost of an accepted operation. An operation with a higher cost than this is rejected. | +When enabling `demand_control` for the first time, set it to `measure` mode. This will allow you to observe the cost of your operations before setting your maximum cost. ## Telemetry for demand control @@ -563,30 +251,29 @@ You can define router telemetry to gather cost information and gain insights int | Instrument | Description | | ---------------- | ---------------------------------------------------------- | -| `cost.actual` | The actual cost of an operation, measured after execution. | -| `cost.estimated` | The estimated cost of an operation before execution. | -| `cost.delta` | The difference between the actual and estimated cost. | +| `cost.actual` | The actual cost of an operation, measured after execution. | +| `cost.estimated` | The estimated cost of an operation before execution. | +| `cost.delta` | The difference between the actual and estimated cost. | ### Attributes Attributes for `cost` can be applied to instruments, spans, and events—anywhere `supergraph` attributes are used. -| Attribute | Value | Description | -| --------------- | ----- | ---------------------------------------------------------- | -| `cost.actual` | boolean | The actual cost of an operation, measured after execution. | -| `cost.estimated` | boolean | The estimated cost of an operation before execution. | -| `cost.delta` | boolean | The difference between the actual and estimated cost. | -| `cost.result` | boolean | The return code of the cost calculation. `COST_OK` or an [error code](../errors/#demand-control) | +| Attribute | Value | Description | +| ---------------- | ------- | ------------------------------------------------------------------------------------------------ | +| `cost.actual` | boolean | The actual cost of an operation, measured after execution. | +| `cost.estimated` | boolean | The estimated cost of an operation before execution. | +| `cost.delta` | boolean | The difference between the actual and estimated cost. | +| `cost.result` | boolean | The return code of the cost calculation. `COST_OK` or an [error code](../errors/#demand-control) | ### Selectors Selectors for `cost` can be applied to instruments, spans, and events—anywhere `supergraph` attributes are used. -| Key | Value | Default | Description | -| ---- | ---------- | ------- | -------------------------------------------------- | +| Key | Value | Default | Description | +| ------ | ---------------------------------------- | ------- | ----------------------------------------------------------------- | | `cost` | `estimated`, `actual`, `delta`, `result` | | The estimated, actual, or delta cost values, or the result string | - ### Examples #### Example instrument @@ -638,3 +325,111 @@ telemetry: graphql.operation.name: true cost.delta: true ``` + +#### Filtering by cost result + +In router telemetry, you can customize instruments that filter their output based on cost results. + +For example, you can record the estimated cost when `cost.result` is `COST_ESTIMATED_TOO_EXPENSIVE`: + +```yaml title="router.yaml" +telemetry: + instrumentation: + instruments: + supergraph: + # custom instrument + cost.rejected.operations: + type: histogram + value: + # Estimated cost is used to populate the histogram + cost: estimated + description: "Estimated cost per rejected operation." + unit: delta + condition: + eq: + # Only show rejected operations. + - cost: result + - "COST_ESTIMATED_TOO_EXPENSIVE" + attributes: + graphql.operation.name: true # Graphql operation name is added as an attribute +``` + +### Configuring instrument output + +When analyzing the costs of operations, if your histograms are not granular enough or don't cover a sufficient range, you can modify the views in your telemetry configuration: + +```yaml +telemetry: + exporters: + metrics: + common: + views: + # Define a custom view because cost is different than the default latency-oriented view of OpenTelemetry + - name: cost.* + aggregation: + histogram: + buckets: + - 0 + - 10 + - 100 + - 1000 + - 10000 + - 100000 + - 1000000 +``` + + + +```text disableCopy=true showLineNumbers=false +# TYPE cost_actual histogram +cost_actual_bucket{otel_scope_name="apollo/router",le="0"} 0 +cost_actual_bucket{otel_scope_name="apollo/router",le="10"} 3 +cost_actual_bucket{otel_scope_name="apollo/router",le="100"} 5 +cost_actual_bucket{otel_scope_name="apollo/router",le="1000"} 11 +cost_actual_bucket{otel_scope_name="apollo/router",le="10000"} 19 +cost_actual_bucket{otel_scope_name="apollo/router",le="100000"} 20 +cost_actual_bucket{otel_scope_name="apollo/router",le="1000000"} 20 +cost_actual_bucket{otel_scope_name="apollo/router",le="+Inf"} 20 +cost_actual_sum{otel_scope_name="apollo/router"} 1097 +cost_actual_count{otel_scope_name="apollo/router"} 20 +# TYPE cost_delta histogram +cost_delta_bucket{otel_scope_name="apollo/router",le="0"} 0 +cost_delta_bucket{otel_scope_name="apollo/router",le="10"} 2 +cost_delta_bucket{otel_scope_name="apollo/router",le="100"} 9 +cost_delta_bucket{otel_scope_name="apollo/router",le="1000"} 7 +cost_delta_bucket{otel_scope_name="apollo/router",le="10000"} 19 +cost_delta_bucket{otel_scope_name="apollo/router",le="100000"} 20 +cost_delta_bucket{otel_scope_name="apollo/router",le="1000000"} 20 +cost_delta_bucket{otel_scope_name="apollo/router",le="+Inf"} 20 +cost_delta_sum{otel_scope_name="apollo/router"} 21934 +cost_delta_count{otel_scope_name="apollo/router"} 1 +# TYPE cost_estimated histogram +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="0"} 0 +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="10"} 5 +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="100"} 5 +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="1000"} 9 +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="10000"} 11 +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="100000"} 20 +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="1000000"} 20 +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="+Inf"} 20 +cost_estimated_sum{cost_result="COST_OK",otel_scope_name="apollo/router"} +cost_estimated_count{cost_result="COST_OK",otel_scope_name="apollo/router"} 20 +``` + + + +An example chart of a histogram: + + + +You can also chart the percentage of operations that would be allowed or rejected with the current configuration: + + From 8ca4d925ee5de67ebf901dcd1f74b3177ed8d609 Mon Sep 17 00:00:00 2001 From: bryn Date: Thu, 22 Aug 2024 17:14:13 +0100 Subject: [PATCH 092/108] Update router bridge to 0.6.0-beta.0+v2.9.0-beta.0 --- Cargo.lock | 28 ++++++++++++++++++++++-- apollo-router/Cargo.toml | 2 +- apollo-router/tests/integration/redis.rs | 14 ++++++------ 3 files changed, 34 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 55ca449bc6..bc2c65d358 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -335,7 +335,7 @@ dependencies = [ "reqwest", "rhai", "rmp", - "router-bridge", + "router-bridge 0.6.0-beta.0+v2.9.0-beta.0", "rowan", "rstack", "rust-embed", @@ -5803,6 +5803,30 @@ dependencies = [ "which", ] +[[package]] +name = "router-bridge" +version = "0.6.0-beta.0+v2.9.0-beta.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c862e7d7dd07e68e6b8f6bf110a0169ed6cc9709a4399df59329db135737f8d" +dependencies = [ + "anyhow", + "async-channel 1.9.0", + "deno_console", + "deno_core", + "deno_url", + "deno_web", + "deno_webidl", + "rand 0.8.5", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "tower-service", + "tracing", + "which", +] + [[package]] name = "router-fuzz" version = "0.0.0" @@ -5818,7 +5842,7 @@ dependencies = [ "libfuzzer-sys", "log", "reqwest", - "router-bridge", + "router-bridge 0.5.30+v2.8.3", "schemars", "serde", "serde_json", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index dbf481bf5a..86f032faa5 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -198,7 +198,7 @@ regex = "1.10.5" reqwest.workspace = true # note: this dependency should _always_ be pinned, prefix the version with an `=` -router-bridge = "=0.5.30+v2.8.3" +router-bridge = "=0.6.0-beta.0+v2.9.0-beta.0" rust-embed = { version = "8.4.0", features = ["include-exclude"] } rustls = "0.21.12" diff --git a/apollo-router/tests/integration/redis.rs b/apollo-router/tests/integration/redis.rs index 6fb3b5d381..329c2962bc 100644 --- a/apollo-router/tests/integration/redis.rs +++ b/apollo-router/tests/integration/redis.rs @@ -26,7 +26,7 @@ async fn query_planner_cache() -> Result<(), BoxError> { // 2. run `docker compose up -d` and connect to the redis container by running `docker-compose exec redis /bin/bash`. // 3. Run the `redis-cli` command from the shell and start the redis `monitor` command. // 4. Run this test and yank the updated cache key from the redis logs. - let known_cache_key = "plan:0:v2.8.3:16385ebef77959fcdc520ad507eb1f7f7df28f1d54a0569e3adabcb4cd00d7ce:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:8ecc6cbc98bab2769e6666a72ba47a4ebd90e6f62256ddcbdc7f352a805e0fe6"; + let known_cache_key = "plan:0:v2.9.0-beta.0:16385ebef77959fcdc520ad507eb1f7f7df28f1d54a0569e3adabcb4cd00d7ce:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:8ecc6cbc98bab2769e6666a72ba47a4ebd90e6f62256ddcbdc7f352a805e0fe6"; let config = RedisConfig::from_url("redis://127.0.0.1:6379").unwrap(); let client = RedisClient::new(config, None, None, None); @@ -921,7 +921,7 @@ async fn connection_failure_blocks_startup() { async fn query_planner_redis_update_query_fragments() { test_redis_query_plan_config_update( include_str!("fixtures/query_planner_redis_config_update_query_fragments.router.yaml"), - "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:cda2b4e476fdce9c4c435627b26cedd177cfbe04ab335fc3e3d895c0d79d965e", + "plan:0:v2.9.0-beta.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:cda2b4e476fdce9c4c435627b26cedd177cfbe04ab335fc3e3d895c0d79d965e", ) .await; } @@ -940,7 +940,7 @@ async fn query_planner_redis_update_planner_mode() { async fn query_planner_redis_update_introspection() { test_redis_query_plan_config_update( include_str!("fixtures/query_planner_redis_config_update_introspection.router.yaml"), - "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:259dd917e4de09b5469629849b91e8ffdfbed2587041fad68b5963369bb13283", + "plan:0:v2.9.0-beta.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:259dd917e4de09b5469629849b91e8ffdfbed2587041fad68b5963369bb13283", ) .await; } @@ -949,7 +949,7 @@ async fn query_planner_redis_update_introspection() { async fn query_planner_redis_update_defer() { test_redis_query_plan_config_update( include_str!("fixtures/query_planner_redis_config_update_defer.router.yaml"), - "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:e4376fe032160ce16399e520c6e815da6cb5cf4dc94a06175b86b64a9bf80201", + "plan:0:v2.9.0-beta.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:e4376fe032160ce16399e520c6e815da6cb5cf4dc94a06175b86b64a9bf80201", ) .await; } @@ -960,7 +960,7 @@ async fn query_planner_redis_update_type_conditional_fetching() { include_str!( "fixtures/query_planner_redis_config_update_type_conditional_fetching.router.yaml" ), - "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:83d899fcb42d2202c39fc8350289b8247021da00ecf3d844553c190c49410507", + "plan:0:v2.9.0-beta.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:83d899fcb42d2202c39fc8350289b8247021da00ecf3d844553c190c49410507", ) .await; } @@ -971,7 +971,7 @@ async fn query_planner_redis_update_reuse_query_fragments() { include_str!( "fixtures/query_planner_redis_config_update_reuse_query_fragments.router.yaml" ), - "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:d48f92f892bd67071694c0538a7e657ff8e0c52e1718f475190c17b503e9e8c3", + "plan:0:v2.9.0-beta.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:d48f92f892bd67071694c0538a7e657ff8e0c52e1718f475190c17b503e9e8c3", ) .await; } @@ -994,7 +994,7 @@ async fn test_redis_query_plan_config_update(updated_config: &str, new_cache_key router.assert_started().await; router.clear_redis_cache().await; - let starting_key = "plan:0:v2.8.3:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:0966f1528d47cee30b6140a164be16148dd360ee10b87744991e9d35af8e8a27"; + let starting_key = "plan:0:v2.9.0-beta.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:0966f1528d47cee30b6140a164be16148dd360ee10b87744991e9d35af8e8a27"; router.execute_default_query().await; router.assert_redis_cache_contains(starting_key, None).await; router.update_config(updated_config).await; From b9ef5119ec6c0f2469f0a1ea1846af27b3009ed2 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Thu, 22 Aug 2024 21:15:39 -0700 Subject: [PATCH 093/108] Update docs and changelog to reflect new federation 2.9.0. --- .changesets/feat_tarmac_peanuts_brandy_reverse.md | 7 +++++++ docs/source/federation-version-support.mdx | 10 +++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 .changesets/feat_tarmac_peanuts_brandy_reverse.md diff --git a/.changesets/feat_tarmac_peanuts_brandy_reverse.md b/.changesets/feat_tarmac_peanuts_brandy_reverse.md new file mode 100644 index 0000000000..1b87851a52 --- /dev/null +++ b/.changesets/feat_tarmac_peanuts_brandy_reverse.md @@ -0,0 +1,7 @@ +### REMINDER: ADD CHANGELOG NOTICE FOR CACHE BUMP FOR FEDERATION UPGRADE + +THIS CHANGELOG SHOULD NOT BE IN THE FINAL RELEASE. THIS IS A REMINDER TO PUT THE BANNER AT THE TOP OF THE CHANGELOG DURING FINALIZATION. + +IF YOU ARE REVIEWING THIS CHANGESET, CALL THIS OUT! :) + +By [@abernix](https://github.com/abernix) \ No newline at end of file diff --git a/docs/source/federation-version-support.mdx b/docs/source/federation-version-support.mdx index 195e9b0018..2574ec0ee1 100644 --- a/docs/source/federation-version-support.mdx +++ b/docs/source/federation-version-support.mdx @@ -37,7 +37,15 @@ The table below shows which version of federation each router release is compile - v1.49.1 and later (see latest releases) + v1.53.0 and later (see latest releases) + + + 2.9.0 + + + + + v1.49.1 - v1.52.1 2.8.1 From da8691efae0f97547044b50704681c81f945c1fb Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Thu, 22 Aug 2024 21:17:32 -0700 Subject: [PATCH 094/108] prep release: v1.53.0-rc.0 --- Cargo.lock | 8 +- apollo-federation/Cargo.toml | 2 +- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- .../templates/base/Cargo.template.toml | 2 +- .../templates/base/xtask/Cargo.template.toml | 2 +- apollo-router/Cargo.toml | 4 +- .../tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- helm/chart/router/Chart.yaml | 4 +- helm/chart/router/README.md | 9 +- licenses.html | 271 +----------------- scripts/install.sh | 2 +- 14 files changed, 31 insertions(+), 283 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bc2c65d358..568d9ee074 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -178,7 +178,7 @@ dependencies = [ [[package]] name = "apollo-federation" -version = "1.52.0" +version = "1.53.0-rc.0" dependencies = [ "apollo-compiler", "derive_more", @@ -229,7 +229,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.52.0" +version = "1.53.0-rc.0" dependencies = [ "access-json", "ahash", @@ -398,7 +398,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.52.0" +version = "1.53.0-rc.0" dependencies = [ "apollo-parser", "apollo-router", @@ -414,7 +414,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.52.0" +version = "1.53.0-rc.0" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/apollo-federation/Cargo.toml b/apollo-federation/Cargo.toml index fba0d03cd5..c03290d5dd 100644 --- a/apollo-federation/Cargo.toml +++ b/apollo-federation/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-federation" -version = "1.52.0" +version = "1.53.0-rc.0" authors = ["The Apollo GraphQL Contributors"] edition = "2021" description = "Apollo Federation" diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index cc1e73902a..cbe2cf5a06 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.52.0" +version = "1.53.0-rc.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index 7bd39a628b..270a69c5c3 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.52.0" +version = "1.53.0-rc.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.template.toml b/apollo-router-scaffold/templates/base/Cargo.template.toml index 21f679e602..7a70c7e031 100644 --- a/apollo-router-scaffold/templates/base/Cargo.template.toml +++ b/apollo-router-scaffold/templates/base/Cargo.template.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.52.0" +apollo-router = "1.53.0-rc.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml index 5194c11c10..49b67b124a 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.52.0" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.53.0-rc.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 86f032faa5..6837ffe9a4 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.52.0" +version = "1.53.0-rc.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" @@ -68,7 +68,7 @@ askama = "0.12.1" access-json = "0.1.0" anyhow = "1.0.86" apollo-compiler.workspace = true -apollo-federation = { path = "../apollo-federation", version = "=1.52.0" } +apollo-federation = { path = "../apollo-federation", version = "=1.53.0-rc.0" } arc-swap = "1.6.0" async-channel = "1.9.0" async-compression = { version = "0.4.6", features = [ diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index 68cb7cdbbd..cb42c32e3e 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.52.0 + image: ghcr.io/apollographql/router:v1.53.0-rc.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 254b9e98f8..44fdc1d490 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.52.0 + image: ghcr.io/apollographql/router:v1.53.0-rc.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index b94e872653..f4520a9e7f 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.52.0 + image: ghcr.io/apollographql/router:v1.53.0-rc.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index 02d1f131bd..386ba56186 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.52.0 +version: 1.53.0-rc.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.52.0" +appVersion: "v1.53.0-rc.0" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index a1940b4b28..284ceec2e5 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.52.0](https://img.shields.io/badge/Version-1.52.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.52.0](https://img.shields.io/badge/AppVersion-v1.52.0-informational?style=flat-square) +![Version: 1.53.0-rc.0](https://img.shields.io/badge/Version-1.53.0--rc.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.53.0-rc.0](https://img.shields.io/badge/AppVersion-v1.53.0--rc.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.52.0 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0-rc.0 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.52.0 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.52.0 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0-rc.0 --values my-values.yaml ``` _See [configuration](#configuration) below._ @@ -79,6 +79,7 @@ helm show values oci://ghcr.io/apollographql/helm-charts/router | probes.readiness | object | `{"initialDelaySeconds":0}` | Configure readiness probe | | replicaCount | int | `1` | | | resources | object | `{}` | | +| rollingUpdate | object | `{}` | Sets the [rolling update strategy parameters](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment). Can take absolute values or % values. | | router | object | `{"args":["--hot-reload"],"configuration":{"health_check":{"listen":"0.0.0.0:8088"},"supergraph":{"listen":"0.0.0.0:4000"}}}` | See https://www.apollographql.com/docs/router/configuration/overview/#yaml-config-file for yaml structure | | securityContext | object | `{}` | | | service.annotations | object | `{}` | | @@ -95,3 +96,5 @@ helm show values oci://ghcr.io/apollographql/helm-charts/router | topologySpreadConstraints | list | `[]` | Sets the [topology spread constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/) for Deployment pods | | virtualservice.enabled | bool | `false` | | +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) diff --git a/licenses.html b/licenses.html index f4c1083d5b..5c67294e08 100644 --- a/licenses.html +++ b/licenses.html @@ -44,11 +44,11 @@

Third Party Licenses

Overview of licenses:

    -
  • Apache License 2.0 (490)
  • -
  • MIT License (165)
  • +
  • Apache License 2.0 (468)
  • +
  • MIT License (163)
  • BSD 3-Clause "New" or "Revised" License (11)
  • +
  • Elastic License 2.0 (10)
  • ISC License (8)
  • -
  • Elastic License 2.0 (6)
  • BSD 2-Clause "Simplified" License (5)
  • Mozilla Public License 2.0 (5)
  • Creative Commons Zero v1.0 Universal (2)
  • @@ -3576,230 +3576,6 @@

    Used by:

    Copyright 2017 Juniper Networks, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -
  • -

    Apache License 2.0

    -

    Used by:

    - -
                                     Apache License
    -                           Version 2.0, January 2004
    -                        http://www.apache.org/licenses/
    -
    -   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -   1. Definitions.
    -
    -      "License" shall mean the terms and conditions for use, reproduction,
    -      and distribution as defined by Sections 1 through 9 of this document.
    -
    -      "Licensor" shall mean the copyright owner or entity authorized by
    -      the copyright owner that is granting the License.
    -
    -      "Legal Entity" shall mean the union of the acting entity and all
    -      other entities that control, are controlled by, or are under common
    -      control with that entity. For the purposes of this definition,
    -      "control" means (i) the power, direct or indirect, to cause the
    -      direction or management of such entity, whether by contract or
    -      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -      outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -      "You" (or "Your") shall mean an individual or Legal Entity
    -      exercising permissions granted by this License.
    -
    -      "Source" form shall mean the preferred form for making modifications,
    -      including but not limited to software source code, documentation
    -      source, and configuration files.
    -
    -      "Object" form shall mean any form resulting from mechanical
    -      transformation or translation of a Source form, including but
    -      not limited to compiled object code, generated documentation,
    -      and conversions to other media types.
    -
    -      "Work" shall mean the work of authorship, whether in Source or
    -      Object form, made available under the License, as indicated by a
    -      copyright notice that is included in or attached to the work
    -      (an example is provided in the Appendix below).
    -
    -      "Derivative Works" shall mean any work, whether in Source or Object
    -      form, that is based on (or derived from) the Work and for which the
    -      editorial revisions, annotations, elaborations, or other modifications
    -      represent, as a whole, an original work of authorship. For the purposes
    -      of this License, Derivative Works shall not include works that remain
    -      separable from, or merely link (or bind by name) to the interfaces of,
    -      the Work and Derivative Works thereof.
    -
    -      "Contribution" shall mean any work of authorship, including
    -      the original version of the Work and any modifications or additions
    -      to that Work or Derivative Works thereof, that is intentionally
    -      submitted to Licensor for inclusion in the Work by the copyright owner
    -      or by an individual or Legal Entity authorized to submit on behalf of
    -      the copyright owner. For the purposes of this definition, "submitted"
    -      means any form of electronic, verbal, or written communication sent
    -      to the Licensor or its representatives, including but not limited to
    -      communication on electronic mailing lists, source code control systems,
    -      and issue tracking systems that are managed by, or on behalf of, the
    -      Licensor for the purpose of discussing and improving the Work, but
    -      excluding communication that is conspicuously marked or otherwise
    -      designated in writing by the copyright owner as "Not a Contribution."
    -
    -      "Contributor" shall mean Licensor and any individual or Legal Entity
    -      on behalf of whom a Contribution has been received by Licensor and
    -      subsequently incorporated within the Work.
    -
    -   2. Grant of Copyright License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      copyright license to reproduce, prepare Derivative Works of,
    -      publicly display, publicly perform, sublicense, and distribute the
    -      Work and such Derivative Works in Source or Object form.
    -
    -   3. Grant of Patent License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      (except as stated in this section) patent license to make, have made,
    -      use, offer to sell, sell, import, and otherwise transfer the Work,
    -      where such license applies only to those patent claims licensable
    -      by such Contributor that are necessarily infringed by their
    -      Contribution(s) alone or by combination of their Contribution(s)
    -      with the Work to which such Contribution(s) was submitted. If You
    -      institute patent litigation against any entity (including a
    -      cross-claim or counterclaim in a lawsuit) alleging that the Work
    -      or a Contribution incorporated within the Work constitutes direct
    -      or contributory patent infringement, then any patent licenses
    -      granted to You under this License for that Work shall terminate
    -      as of the date such litigation is filed.
    -
    -   4. Redistribution. You may reproduce and distribute copies of the
    -      Work or Derivative Works thereof in any medium, with or without
    -      modifications, and in Source or Object form, provided that You
    -      meet the following conditions:
    -
    -      (a) You must give any other recipients of the Work or
    -          Derivative Works a copy of this License; and
    -
    -      (b) You must cause any modified files to carry prominent notices
    -          stating that You changed the files; and
    -
    -      (c) You must retain, in the Source form of any Derivative Works
    -          that You distribute, all copyright, patent, trademark, and
    -          attribution notices from the Source form of the Work,
    -          excluding those notices that do not pertain to any part of
    -          the Derivative Works; and
    -
    -      (d) If the Work includes a "NOTICE" text file as part of its
    -          distribution, then any Derivative Works that You distribute must
    -          include a readable copy of the attribution notices contained
    -          within such NOTICE file, excluding those notices that do not
    -          pertain to any part of the Derivative Works, in at least one
    -          of the following places: within a NOTICE text file distributed
    -          as part of the Derivative Works; within the Source form or
    -          documentation, if provided along with the Derivative Works; or,
    -          within a display generated by the Derivative Works, if and
    -          wherever such third-party notices normally appear. The contents
    -          of the NOTICE file are for informational purposes only and
    -          do not modify the License. You may add Your own attribution
    -          notices within Derivative Works that You distribute, alongside
    -          or as an addendum to the NOTICE text from the Work, provided
    -          that such additional attribution notices cannot be construed
    -          as modifying the License.
    -
    -      You may add Your own copyright statement to Your modifications and
    -      may provide additional or different license terms and conditions
    -      for use, reproduction, or distribution of Your modifications, or
    -      for any such Derivative Works as a whole, provided Your use,
    -      reproduction, and distribution of the Work otherwise complies with
    -      the conditions stated in this License.
    -
    -   5. Submission of Contributions. Unless You explicitly state otherwise,
    -      any Contribution intentionally submitted for inclusion in the Work
    -      by You to the Licensor shall be under the terms and conditions of
    -      this License, without any additional terms or conditions.
    -      Notwithstanding the above, nothing herein shall supersede or modify
    -      the terms of any separate license agreement you may have executed
    -      with Licensor regarding such Contributions.
    -
    -   6. Trademarks. This License does not grant permission to use the trade
    -      names, trademarks, service marks, or product names of the Licensor,
    -      except as required for reasonable and customary use in describing the
    -      origin of the Work and reproducing the content of the NOTICE file.
    -
    -   7. Disclaimer of Warranty. Unless required by applicable law or
    -      agreed to in writing, Licensor provides the Work (and each
    -      Contributor provides its Contributions) on an "AS IS" BASIS,
    -      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -      implied, including, without limitation, any warranties or conditions
    -      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -      PARTICULAR PURPOSE. You are solely responsible for determining the
    -      appropriateness of using or redistributing the Work and assume any
    -      risks associated with Your exercise of permissions under this License.
    -
    -   8. Limitation of Liability. In no event and under no legal theory,
    -      whether in tort (including negligence), contract, or otherwise,
    -      unless required by applicable law (such as deliberate and grossly
    -      negligent acts) or agreed to in writing, shall any Contributor be
    -      liable to You for damages, including any direct, indirect, special,
    -      incidental, or consequential damages of any character arising as a
    -      result of this License or out of the use or inability to use the
    -      Work (including but not limited to damages for loss of goodwill,
    -      work stoppage, computer failure or malfunction, or any and all
    -      other commercial damages or losses), even if such Contributor
    -      has been advised of the possibility of such damages.
    -
    -   9. Accepting Warranty or Additional Liability. While redistributing
    -      the Work or Derivative Works thereof, You may choose to offer,
    -      and charge a fee for, acceptance of support, warranty, indemnity,
    -      or other liability obligations and/or rights consistent with this
    -      License. However, in accepting such obligations, You may act only
    -      on Your own behalf and on Your sole responsibility, not on behalf
    -      of any other Contributor, and only if You agree to indemnify,
    -      defend, and hold each Contributor harmless for any liability
    -      incurred by, or claims asserted against, such Contributor by reason
    -      of your accepting any such warranty or additional liability.
    -
    -   END OF TERMS AND CONDITIONS
    -
    -   APPENDIX: How to apply the Apache License to your work.
    -
    -      To apply the Apache License to your work, attach the following
    -      boilerplate notice, with the fields enclosed by brackets "{}"
    -      replaced with your own identifying information. (Don't include
    -      the brackets!)  The text should be enclosed in the appropriate
    -      comment syntax for the file format. We also recommend that a
    -      file or class name and description of purpose be included on the
    -      same "printed page" as the copyright notice for easier
    -      identification within third-party archives.
    -
    -   Copyright 2017-NOW Actix Team
    -
        Licensed under the Apache License, Version 2.0 (the "License");
        you may not use this file except in compliance with the License.
        You may obtain a copy of the License at
    @@ -6735,7 +6511,6 @@ 

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -8660,7 +8435,6 @@ 

    Used by:

  • derive_arbitrary
  • displaydoc
  • either
  • -
  • env_logger
  • envmnt
  • equivalent
  • error-chain
  • @@ -8684,7 +8458,6 @@

    Used by:

  • hdrhistogram
  • heck
  • heck
  • -
  • hermit-abi
  • hermit-abi
  • httparse
  • humantime-serde
  • @@ -11516,10 +11289,8 @@

    Apache License 2.0

    Used by:

    ../../LICENSE-APACHE
  • @@ -12170,7 +11941,7 @@

    Used by:

    Apache License 2.0

    Used by:

    Copyright 2021 Apollo Graph, Inc.
     
    @@ -13680,34 +13454,6 @@ 

    Used by:

    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -
    - -
  • -

    MIT License

    -

    Used by:

    - -
    Copyright (c) 2015-2019 Doug Tangren
    -
    -Permission is hereby granted, free of charge, to any person obtaining
    -a copy of this software and associated documentation files (the
    -"Software"), to deal in the Software without restriction, including
    -without limitation the rights to use, copy, modify, merge, publish,
    -distribute, sublicense, and/or sell copies of the Software, and to
    -permit persons to whom the Software is furnished to do so, subject to
    -the following conditions:
    -
    -The above copyright notice and this permission notice shall be
    -included in all copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
    -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
    -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
    -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
    -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     
  • @@ -15596,7 +15342,6 @@

    Used by:

    MIT License

    Used by:

    The MIT License (MIT)
    diff --git a/scripts/install.sh b/scripts/install.sh
    index de9ed49dae..df887e5d44 100755
    --- a/scripts/install.sh
    +++ b/scripts/install.sh
    @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa
     
     # Router version defined in apollo-router's Cargo.toml
     # Note: Change this line manually during the release steps.
    -PACKAGE_VERSION="v1.52.0"
    +PACKAGE_VERSION="v1.53.0-rc.0"
     
     download_binary() {
         downloader --check
    
    From dfb674ca55ed38c39b05e5c27e2424e948789121 Mon Sep 17 00:00:00 2001
    From: Coenen Benjamin 
    Date: Mon, 26 Aug 2024 09:46:31 +0200
    Subject: [PATCH 095/108] Apply suggestions from code review
    
    Co-authored-by: Edward Huang 
    ---
     .changesets/feat_bnjjj_feat_417.md            | 21 +++++++++----------
     .../feat_candle_exhale_deodorant_weeds.md     | 10 +++++----
     .changesets/fix_bnjjj_fix_5702.md             |  4 ++--
     ...bnjjj_fix_fatal_error_subgraph_response.md |  5 ++++-
     ...x_bnjjj_fix_subgraph_selector_for_event.md |  5 ++++-
     ...fix_bnjjj_fix_supergraph_query_selector.md |  6 ++++--
     .../fix_bnjjj_improve_gt_lt_conditions.md     |  6 ++++--
     ...int_bnjjj_improve_perf_custom_telemetry.md |  4 ++--
     8 files changed, 36 insertions(+), 25 deletions(-)
    
    diff --git a/.changesets/feat_bnjjj_feat_417.md b/.changesets/feat_bnjjj_feat_417.md
    index d4aa827ccd..7c4dc0aba5 100644
    --- a/.changesets/feat_bnjjj_feat_417.md
    +++ b/.changesets/feat_bnjjj_feat_417.md
    @@ -1,18 +1,17 @@
    -### Add support of other format for trace id in telemetry ([PR #5735](https://github.com/apollographql/router/pull/5735))
    +### Support new telemetry trace ID format ([PR #5735](https://github.com/apollographql/router/pull/5735))
     
    -Currently we support datadog and otel traceID formats and decimal. However we would like to also support UUID.
    +The router supports a new UUID format for telemetry trace IDs.
     
    -Unify the two `TraceIdFormat` enums into a single enum that us used across selectors and experimental_expose_trace id.
     
    -Ensure the following formats are supported:
    +The following formats are supported in router configuration for trace IDs:
     
    -+ open_telemetry
    -+ hexadecimal  (same as opentelemetry)
    -+ decimal
    -+ datadog
    -+ uuid (this has dashes)
    +* `open_telemetry`
    +* `hexadecimal`  (same as `opentelemetry`)
    +* `decimal`
    +* `datadog`
    +* `uuid` (may contain dashes)
     
    -Add support for logging to output using `TraceIdFormat`
    +You can configure router logging to display the formatted trace ID with `display_trace_id`:
     
     ```yaml
      telemetry:
    @@ -21,7 +20,7 @@ Add support for logging to output using `TraceIdFormat`
           stdout:
             format:
               json:
    -            disaplay_trace_id: (true|false|open_telemetry|hexadecimal|decimal|datadog|uuid)
    +            display_trace_id: (true|false|open_telemetry|hexadecimal|decimal|datadog|uuid)
     ```
     
     By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5735
    \ No newline at end of file
    diff --git a/.changesets/feat_candle_exhale_deodorant_weeds.md b/.changesets/feat_candle_exhale_deodorant_weeds.md
    index ed3f311569..4e86c7c6f7 100644
    --- a/.changesets/feat_candle_exhale_deodorant_weeds.md
    +++ b/.changesets/feat_candle_exhale_deodorant_weeds.md
    @@ -1,6 +1,9 @@
    -### Add warnings for invalid configuration on custom telemetry ([PR #5759](https://github.com/apollographql/router/issues/5759))
    +### Add warnings for invalid configuration of custom telemetry ([PR #5759](https://github.com/apollographql/router/issues/5759))
     
    -For example sometimes if you have configuration like this:
    +The router now logs warnings when running with telemetry that may have invalid custom configurations.
    + 
    +
    +For example, you may customize telemetry using invalid conditions or inaccessible statuses:
     
     ```yaml
     telemetry:
    @@ -20,7 +23,6 @@ telemetry:
                 - product
     ```
     
    -This configuration is syntaxically correct but wouldn't probably do what you would like to. I put comments to highlight 2 mistakes in this example.
    -Before it was silently computed, now you'll get warning when starting the router.
    +Although the configuration is syntactically correct, its customization is invalid, and the router now outputs warnings for such invalid configurations.
     
     By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5759
    \ No newline at end of file
    diff --git a/.changesets/fix_bnjjj_fix_5702.md b/.changesets/fix_bnjjj_fix_5702.md
    index 14e662e6bb..d364b78f27 100644
    --- a/.changesets/fix_bnjjj_fix_5702.md
    +++ b/.changesets/fix_bnjjj_fix_5702.md
    @@ -1,6 +1,6 @@
    -### Improve support of conditions at the request level, especially for events ([Issue #5702](https://github.com/apollographql/router/issues/5702))
    +### Fix `exists` condition for custom telemetry events ([Issue #5702](https://github.com/apollographql/router/issues/5702))
     
    -`exists` condition is now properly handled with events, this configuration will now work:
    +The router now properly handles the `exists` condition for events. The following configuration now works as intended:
     
     ```yaml
     telemetry:
    diff --git a/.changesets/fix_bnjjj_fix_fatal_error_subgraph_response.md b/.changesets/fix_bnjjj_fix_fatal_error_subgraph_response.md
    index 9e09db2866..b36b9ab32b 100644
    --- a/.changesets/fix_bnjjj_fix_fatal_error_subgraph_response.md
    +++ b/.changesets/fix_bnjjj_fix_fatal_error_subgraph_response.md
    @@ -1,5 +1,8 @@
     ### fix(subgraph_service): when the subgraph connection is closed or in error, return a proper subgraph response ([PR #5859](https://github.com/apollographql/router/pull/5859))
     
    -When the subgraph connection is closed or in error, return a proper subgraph response containing an error. This was preventing subgraph response service to be triggered in coprocessor and rhai.
    +
    +The router now returns a proper subgraph response, with an error if necessary, when a subgraph connection is closed or returns an error. 
    + 
    +Previously, this issue prevented the subgraph response service from being triggered in coprocessors or Rhai scripts.
     
     By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5859
    \ No newline at end of file
    diff --git a/.changesets/fix_bnjjj_fix_subgraph_selector_for_event.md b/.changesets/fix_bnjjj_fix_subgraph_selector_for_event.md
    index db47321d20..9894b91622 100644
    --- a/.changesets/fix_bnjjj_fix_subgraph_selector_for_event.md
    +++ b/.changesets/fix_bnjjj_fix_subgraph_selector_for_event.md
    @@ -1,6 +1,9 @@
     ### Evaluate selectors in response stage when possible ([PR #5725](https://github.com/apollographql/router/pull/5725))
     
    -As `events` are triggered at a specific event (`request`|`response`|`error`) we can only have condition for the related event, but sometimes selectors that can be applied at several events (like `subgraph_name` to get the subgraph name). Adds support for various supergraph selectors on response events.
    +
    +The router now supports having various supergraph selectors on response events.
    +
    +Because `events` are triggered at a specific event (`request`|`response`|`error`), you usually have only one condition for a related event. You can however have selectors that can be applied to several events, like `subgraph_name` to get the subgraph name). 
     
     Example of an event to log the raw subgraph response only on a subgraph named `products`, this was not working before.
     
    diff --git a/.changesets/fix_bnjjj_fix_supergraph_query_selector.md b/.changesets/fix_bnjjj_fix_supergraph_query_selector.md
    index 0914846045..4546589f00 100644
    --- a/.changesets/fix_bnjjj_fix_supergraph_query_selector.md
    +++ b/.changesets/fix_bnjjj_fix_supergraph_query_selector.md
    @@ -1,6 +1,8 @@
    -### Execute supergraph query selector also on events ([PR #5764](https://github.com/apollographql/router/pull/5764))
    +### Support supergraph query selector for events ([PR #5764](https://github.com/apollographql/router/pull/5764))
     
    -The `query: root_fields` selector works on `response` stage for events right now but it should also work on `event_response`. This configuration is now working:
    +The router now supports the `query: root_fields` selector for `event_response`. Previously the selector worked for `response` stage events but didn't work for `event_response`. 
    +
    +The following configuration for a `query: root_fields` on an `event_response` now works:
     
     ```yaml
     telemetry:
    diff --git a/.changesets/fix_bnjjj_improve_gt_lt_conditions.md b/.changesets/fix_bnjjj_improve_gt_lt_conditions.md
    index bd7f0fc99e..8923ee459b 100644
    --- a/.changesets/fix_bnjjj_improve_gt_lt_conditions.md
    +++ b/.changesets/fix_bnjjj_improve_gt_lt_conditions.md
    @@ -1,6 +1,8 @@
    -### Add the ability for `gt`/`lt` conditions to parse the string selector to number ([PR #5758](https://github.com/apollographql/router/pull/5758))
    +### Support `gt`/`lt` conditions for parsing string selectors to numbers ([PR #5758](https://github.com/apollographql/router/pull/5758))
     
    -This will enable the ability to have gt/lt conditions on header selectors for example, if you want to put a specific attribute on a span if the `content-length` header is greater than 100:
    +The router now supports greater than (`gt`) and less than (`lt`) conditions for header selectors.
    + 
    +The following example applies an attribute on a span if the `content-length` header is greater than 100:
     
     ```yaml
     telemetry:
    diff --git a/.changesets/maint_bnjjj_improve_perf_custom_telemetry.md b/.changesets/maint_bnjjj_improve_perf_custom_telemetry.md
    index cc27c908d5..722e5ad6ed 100644
    --- a/.changesets/maint_bnjjj_improve_perf_custom_telemetry.md
    +++ b/.changesets/maint_bnjjj_improve_perf_custom_telemetry.md
    @@ -1,5 +1,5 @@
    -### Improve performance, don't re-create meter and instruments on every calls in Telemetry ([PR #5629](https://github.com/apollographql/router/pull/5629))
    +### Improve performance by optimizing telemetry meter and instrument creation ([PR #5629](https://github.com/apollographql/router/pull/5629))
     
    -The creation of otel instruments using a regex is no longer part of the hot path. Now we create these instruments when starting the telemetry plugin and not in every serives.
    +The router's performance has been improved by removing telemetry creation out of the critical path, from being created in every service to being created when starting the telemetry plugin.
     
     By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5629
    \ No newline at end of file
    
    From 95c20ef79e48674aa5a0266619cac1dcd41c73a7 Mon Sep 17 00:00:00 2001
    From: Jesse Rosenberger 
    Date: Mon, 26 Aug 2024 16:44:34 +0300
    Subject: [PATCH 096/108] Update .changesets/feat_helm_rollingupdate.md
    
    Co-authored-by: Edward Huang 
    ---
     .changesets/feat_helm_rollingupdate.md | 5 ++---
     1 file changed, 2 insertions(+), 3 deletions(-)
    
    diff --git a/.changesets/feat_helm_rollingupdate.md b/.changesets/feat_helm_rollingupdate.md
    index b59d016fb0..84094eb0a9 100644
    --- a/.changesets/feat_helm_rollingupdate.md
    +++ b/.changesets/feat_helm_rollingupdate.md
    @@ -1,6 +1,5 @@
    -### Support providing RollingUpdate maxSurge and maxUnavailable values ([Issue #5664](https://github.com/apollographql/router/issues/5664))
    +### Helm: Support `maxSurge` and `maxUnavailable` for rolling updates ([Issue #5664](https://github.com/apollographql/router/issues/5664))
     
    -RollingUpdate maxSurge and maxUnavailable are commonly used deployment configuration values.  This change makes their
    -values able to be set via the router helm chart. 
    +The router Helm chart now supports the configuration of `maxSurge` and `maxUnavailable` for the `RollingUpdate` deployment strategy.  
     
     By [Jon Christiansen](https://github.com/theJC) in https://github.com/apollographql/router/pull/5665
    
    From 892181fd87f7392bab92724bc3444cbb7d7c2bda Mon Sep 17 00:00:00 2001
    From: Taylor Ninesling 
    Date: Tue, 27 Aug 2024 10:58:51 -0700
    Subject: [PATCH 097/108] Use router-bridge beta.1 (#5887)
    
    Co-authored-by: Simon Sapin 
    ---
     Cargo.lock                                    |  6 +-
     apollo-router/Cargo.toml                      |  2 +-
     .../cost_calculator/static_cost.rs            | 68 ++++++++++++++++---
     ...dden_field_yields_expected_query_plan.snap |  2 +-
     ...dden_field_yields_expected_query_plan.snap |  4 +-
     ...y_plan__tests__it_expose_query_plan-2.snap |  8 +--
     ...ery_plan__tests__it_expose_query_plan.snap |  8 +--
     ...ridge_query_planner__tests__plan_root.snap |  2 +-
     apollo-router/tests/integration/redis.rs      | 12 ++--
     ...tegration__redis__query_planner_cache.snap |  2 +-
     .../snapshots/set_context__set_context.snap   |  4 +-
     ...__set_context_dependent_fetch_failure.snap |  4 +-
     .../set_context__set_context_list.snap        |  4 +-
     ...et_context__set_context_list_of_lists.snap |  4 +-
     ...set_context__set_context_no_typenames.snap |  4 +-
     ...et_context__set_context_type_mismatch.snap |  4 +-
     .../set_context__set_context_union.snap       |  6 +-
     ...__set_context_unrelated_fetch_failure.snap |  6 +-
     .../set_context__set_context_with_null.snap   |  4 +-
     ..._conditions__type_conditions_disabled.snap |  4 +-
     ...e_conditions__type_conditions_enabled.snap |  6 +-
     ...ions_enabled_generate_query_fragments.snap |  6 +-
     ..._type_conditions_enabled_list_of_list.snap |  6 +-
     ...nditions_enabled_list_of_list_of_list.snap |  6 +-
     ...s_enabled_shouldnt_make_article_fetch.snap |  6 +-
     25 files changed, 120 insertions(+), 68 deletions(-)
    
    diff --git a/Cargo.lock b/Cargo.lock
    index 568d9ee074..a7666c79e1 100644
    --- a/Cargo.lock
    +++ b/Cargo.lock
    @@ -335,7 +335,7 @@ dependencies = [
      "reqwest",
      "rhai",
      "rmp",
    - "router-bridge 0.6.0-beta.0+v2.9.0-beta.0",
    + "router-bridge 0.6.0-beta.1+v2.9.0-beta.0",
      "rowan",
      "rstack",
      "rust-embed",
    @@ -5805,9 +5805,9 @@ dependencies = [
     
     [[package]]
     name = "router-bridge"
    -version = "0.6.0-beta.0+v2.9.0-beta.0"
    +version = "0.6.0-beta.1+v2.9.0-beta.0"
     source = "registry+https://github.com/rust-lang/crates.io-index"
    -checksum = "4c862e7d7dd07e68e6b8f6bf110a0169ed6cc9709a4399df59329db135737f8d"
    +checksum = "349dcc3134916c7888f2ebbb5c66fefa2693a1f9ff522e04672abb895b66cb9b"
     dependencies = [
      "anyhow",
      "async-channel 1.9.0",
    diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml
    index 6837ffe9a4..5906a76049 100644
    --- a/apollo-router/Cargo.toml
    +++ b/apollo-router/Cargo.toml
    @@ -198,7 +198,7 @@ regex = "1.10.5"
     reqwest.workspace = true
     
     # note: this dependency should _always_ be pinned, prefix the version with an `=`
    -router-bridge = "=0.6.0-beta.0+v2.9.0-beta.0"
    +router-bridge = "=0.6.0-beta.1+v2.9.0-beta.0"
     
     rust-embed = { version = "8.4.0", features = ["include-exclude"] }
     rustls = "0.21.12"
    diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs
    index 4f2e585db3..439d09558f 100644
    --- a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs
    +++ b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs
    @@ -567,12 +567,17 @@ mod tests {
         use apollo_federation::query_plan::query_planner::QueryPlanner;
         use bytes::Bytes;
         use test_log::test;
    +    use tower::Service;
     
         use super::*;
    +    use crate::query_planner::BridgeQueryPlanner;
         use crate::services::layers::query_analysis::ParsedDocument;
    +    use crate::services::QueryPlannerContent;
    +    use crate::services::QueryPlannerRequest;
         use crate::spec;
         use crate::spec::Query;
         use crate::Configuration;
    +    use crate::Context;
     
         impl StaticCostCalculator {
             fn rust_planned(
    @@ -625,7 +630,47 @@ mod tests {
                 .unwrap()
         }
     
    -    async fn planned_cost(schema_str: &str, query_str: &str) -> f64 {
    +    async fn planned_cost_js(schema_str: &str, query_str: &str) -> f64 {
    +        let config: Arc = Arc::new(Default::default());
    +        let (schema, query) = parse_schema_and_operation(schema_str, query_str, &config);
    +        let supergraph_schema = schema.supergraph_schema().clone();
    +
    +        let mut planner = BridgeQueryPlanner::new(schema.into(), config.clone(), None, None)
    +            .await
    +            .unwrap();
    +
    +        let ctx = Context::new();
    +        ctx.extensions()
    +            .with_lock(|mut lock| lock.insert::(query));
    +
    +        let planner_res = planner
    +            .call(QueryPlannerRequest::new(query_str.to_string(), None, ctx))
    +            .await
    +            .unwrap();
    +        let query_plan = match planner_res.content.unwrap() {
    +            QueryPlannerContent::Plan { plan } => plan,
    +            _ => panic!("Query planner returned unexpected non-plan content"),
    +        };
    +
    +        let schema = DemandControlledSchema::new(Arc::new(supergraph_schema)).unwrap();
    +        let mut demand_controlled_subgraph_schemas = HashMap::new();
    +        for (subgraph_name, subgraph_schema) in planner.subgraph_schemas().iter() {
    +            let demand_controlled_subgraph_schema =
    +                DemandControlledSchema::new(subgraph_schema.clone()).unwrap();
    +            demand_controlled_subgraph_schemas
    +                .insert(subgraph_name.to_string(), demand_controlled_subgraph_schema);
    +        }
    +
    +        let calculator = StaticCostCalculator::new(
    +            Arc::new(schema),
    +            Arc::new(demand_controlled_subgraph_schemas),
    +            100,
    +        );
    +
    +        calculator.planned(&query_plan).unwrap()
    +    }
    +
    +    fn planned_cost_rust(schema_str: &str, query_str: &str) -> f64 {
             let config: Arc = Arc::new(Default::default());
             let (schema, query) = parse_schema_and_operation(schema_str, query_str, &config);
     
    @@ -798,7 +843,8 @@ mod tests {
             let response = include_bytes!("./fixtures/federated_ships_required_response.json");
     
             assert_eq!(estimated_cost(schema, query), 10200.0);
    -        assert_eq!(planned_cost(schema, query).await, 10400.0);
    +        assert_eq!(planned_cost_js(schema, query).await, 10400.0);
    +        assert_eq!(planned_cost_rust(schema, query), 10400.0);
             assert_eq!(actual_cost(schema, query, response), 2.0);
         }
     
    @@ -809,7 +855,8 @@ mod tests {
             let response = include_bytes!("./fixtures/federated_ships_fragment_response.json");
     
             assert_eq!(estimated_cost(schema, query), 300.0);
    -        assert_eq!(planned_cost(schema, query).await, 400.0);
    +        assert_eq!(planned_cost_js(schema, query).await, 400.0);
    +        assert_eq!(planned_cost_rust(schema, query), 400.0);
             assert_eq!(actual_cost(schema, query, response), 6.0);
         }
     
    @@ -820,7 +867,8 @@ mod tests {
             let response = include_bytes!("./fixtures/federated_ships_fragment_response.json");
     
             assert_eq!(estimated_cost(schema, query), 300.0);
    -        assert_eq!(planned_cost(schema, query).await, 400.0);
    +        assert_eq!(planned_cost_js(schema, query).await, 400.0);
    +        assert_eq!(planned_cost_rust(schema, query), 400.0);
             assert_eq!(actual_cost(schema, query, response), 6.0);
         }
     
    @@ -831,7 +879,8 @@ mod tests {
             let response = include_bytes!("./fixtures/federated_ships_deferred_response.json");
     
             assert_eq!(estimated_cost(schema, query), 10200.0);
    -        assert_eq!(planned_cost(schema, query).await, 10400.0);
    +        assert_eq!(planned_cost_js(schema, query).await, 10400.0);
    +        assert_eq!(planned_cost_rust(schema, query), 10400.0);
             assert_eq!(actual_cost(schema, query, response), 2.0);
         }
     
    @@ -865,7 +914,8 @@ mod tests {
             let response = include_bytes!("./fixtures/custom_cost_response.json");
     
             assert_eq!(estimated_cost(schema, query), 127.0);
    -        assert_eq!(planned_cost(schema, query).await, 127.0);
    +        assert_eq!(planned_cost_js(schema, query).await, 127.0);
    +        assert_eq!(planned_cost_rust(schema, query), 127.0);
             assert_eq!(actual_cost(schema, query, response), 125.0);
         }
     
    @@ -876,7 +926,8 @@ mod tests {
             let response = include_bytes!("./fixtures/custom_cost_response.json");
     
             assert_eq!(estimated_cost(schema, query), 127.0);
    -        assert_eq!(planned_cost(schema, query).await, 127.0);
    +        assert_eq!(planned_cost_js(schema, query).await, 127.0);
    +        assert_eq!(planned_cost_rust(schema, query), 127.0);
             assert_eq!(actual_cost(schema, query, response), 125.0);
         }
     
    @@ -888,7 +939,8 @@ mod tests {
             let response = include_bytes!("./fixtures/custom_cost_response.json");
     
             assert_eq!(estimated_cost(schema, query), 132.0);
    -        assert_eq!(planned_cost(schema, query).await, 132.0);
    +        assert_eq!(planned_cost_js(schema, query).await, 132.0);
    +        assert_eq!(planned_cost_rust(schema, query), 132.0);
             assert_eq!(actual_cost(schema, query, response), 125.0);
         }
     }
    diff --git a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap
    index cb657dcdce..01cca77a5b 100644
    --- a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap
    +++ b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap
    @@ -19,7 +19,7 @@ expression: query_plan
               "inputRewrites": null,
               "outputRewrites": null,
               "contextRewrites": null,
    -          "schemaAwareHash": "12dda6193654ae4fe6e38bc09d4f81cc73d0c9e098692096f72d2158eef4776f",
    +          "schemaAwareHash": "23605b350473485e40bc8b1245f0c5c226a2997a96291bf3ad3412570a5172bb",
               "authorization": {
                 "is_authenticated": false,
                 "scopes": [],
    diff --git a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap
    index d18a3e2b11..455898049f 100644
    --- a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap
    +++ b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap
    @@ -24,7 +24,7 @@ expression: query_plan
                   "inputRewrites": null,
                   "outputRewrites": null,
                   "contextRewrites": null,
    -              "schemaAwareHash": "00ad582ea45fc1bce436b36b21512f3d2c47b74fdbdc61e4b349289722c9ecf2",
    +              "schemaAwareHash": "d14f50b039a3b961385f4d2a878c5800dd01141cddd3f8f1874a5499bbe397a9",
                   "authorization": {
                     "is_authenticated": false,
                     "scopes": [],
    @@ -63,7 +63,7 @@ expression: query_plan
                     "inputRewrites": null,
                     "outputRewrites": null,
                     "contextRewrites": null,
    -                "schemaAwareHash": "a8ebdc2151a2e5207882e43c6906c0c64167fd9a8e0c7c4becc47736a5105096",
    +                "schemaAwareHash": "caa182daf66e4ffe9b1af8c386092ba830887bbae0d58395066fa480525080ec",
                     "authorization": {
                       "is_authenticated": false,
                       "scopes": [],
    diff --git a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap
    index 0d6ab611f6..e914049664 100644
    --- a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap
    +++ b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap
    @@ -69,7 +69,7 @@ expression: "serde_json::to_value(response).unwrap()"
                   "inputRewrites": null,
                   "outputRewrites": null,
                   "contextRewrites": null,
    -              "schemaAwareHash": "7245d488e97c3b2ac9f5fa4dd4660940b94ad81af070013305b2c0f76337b2f9",
    +              "schemaAwareHash": "39cac6386a951cd4dbdfc9c91d7d24cc1061481ab03b72c483422446e09cba32",
                   "authorization": {
                     "is_authenticated": false,
                     "scopes": [],
    @@ -109,7 +109,7 @@ expression: "serde_json::to_value(response).unwrap()"
                     "inputRewrites": null,
                     "outputRewrites": null,
                     "contextRewrites": null,
    -                "schemaAwareHash": "6e0b4156706ea0cf924500cfdc99dd44b9f0ed07e2d3f888d4aff156e6a33238",
    +                "schemaAwareHash": "ee6ac550117eed7d8fcaf66c83fd5177bf03a9d5761f484e2664ea4e66149127",
                     "authorization": {
                       "is_authenticated": false,
                       "scopes": [],
    @@ -156,7 +156,7 @@ expression: "serde_json::to_value(response).unwrap()"
                         "inputRewrites": null,
                         "outputRewrites": null,
                         "contextRewrites": null,
    -                    "schemaAwareHash": "ff649f3d70241d5a8cd5f5d03ff4c41ecff72b0e4129a480207b05ac92318042",
    +                    "schemaAwareHash": "76d400fc6a494cbe05a44751923e570ee31928f0fb035ea36c14d4d6f4545482",
                         "authorization": {
                           "is_authenticated": false,
                           "scopes": [],
    @@ -200,7 +200,7 @@ expression: "serde_json::to_value(response).unwrap()"
                         "inputRewrites": null,
                         "outputRewrites": null,
                         "contextRewrites": null,
    -                    "schemaAwareHash": "bf9f3beda78a7a565e47c862157bad4ec871d724d752218da1168455dddca074",
    +                    "schemaAwareHash": "66c61f60e730b77cd0a58908fee01dc7a0742c47e9f847037e01297d37918821",
                         "authorization": {
                           "is_authenticated": false,
                           "scopes": [],
    diff --git a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap
    index 0d6ab611f6..e914049664 100644
    --- a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap
    +++ b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap
    @@ -69,7 +69,7 @@ expression: "serde_json::to_value(response).unwrap()"
                   "inputRewrites": null,
                   "outputRewrites": null,
                   "contextRewrites": null,
    -              "schemaAwareHash": "7245d488e97c3b2ac9f5fa4dd4660940b94ad81af070013305b2c0f76337b2f9",
    +              "schemaAwareHash": "39cac6386a951cd4dbdfc9c91d7d24cc1061481ab03b72c483422446e09cba32",
                   "authorization": {
                     "is_authenticated": false,
                     "scopes": [],
    @@ -109,7 +109,7 @@ expression: "serde_json::to_value(response).unwrap()"
                     "inputRewrites": null,
                     "outputRewrites": null,
                     "contextRewrites": null,
    -                "schemaAwareHash": "6e0b4156706ea0cf924500cfdc99dd44b9f0ed07e2d3f888d4aff156e6a33238",
    +                "schemaAwareHash": "ee6ac550117eed7d8fcaf66c83fd5177bf03a9d5761f484e2664ea4e66149127",
                     "authorization": {
                       "is_authenticated": false,
                       "scopes": [],
    @@ -156,7 +156,7 @@ expression: "serde_json::to_value(response).unwrap()"
                         "inputRewrites": null,
                         "outputRewrites": null,
                         "contextRewrites": null,
    -                    "schemaAwareHash": "ff649f3d70241d5a8cd5f5d03ff4c41ecff72b0e4129a480207b05ac92318042",
    +                    "schemaAwareHash": "76d400fc6a494cbe05a44751923e570ee31928f0fb035ea36c14d4d6f4545482",
                         "authorization": {
                           "is_authenticated": false,
                           "scopes": [],
    @@ -200,7 +200,7 @@ expression: "serde_json::to_value(response).unwrap()"
                         "inputRewrites": null,
                         "outputRewrites": null,
                         "contextRewrites": null,
    -                    "schemaAwareHash": "bf9f3beda78a7a565e47c862157bad4ec871d724d752218da1168455dddca074",
    +                    "schemaAwareHash": "66c61f60e730b77cd0a58908fee01dc7a0742c47e9f847037e01297d37918821",
                         "authorization": {
                           "is_authenticated": false,
                           "scopes": [],
    diff --git a/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap b/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap
    index d49c351866..16ba934103 100644
    --- a/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap
    +++ b/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap
    @@ -15,7 +15,7 @@ Fetch(
             output_rewrites: None,
             context_rewrites: None,
             schema_aware_hash: QueryHash(
    -            "a4ab3ffe0fd7863aea8cd1e85d019d2c64ec0351d62f9759bed3c9dc707ea315",
    +            "5c5036eef33484e505dd5a8666fd0a802e60d830964a4dbbf662526398563ffd",
             ),
             authorization: CacheKeyMetadata {
                 is_authenticated: false,
    diff --git a/apollo-router/tests/integration/redis.rs b/apollo-router/tests/integration/redis.rs
    index 329c2962bc..75a812d202 100644
    --- a/apollo-router/tests/integration/redis.rs
    +++ b/apollo-router/tests/integration/redis.rs
    @@ -411,13 +411,13 @@ async fn entity_cache() -> Result<(), BoxError> {
         insta::assert_json_snapshot!(response);
     
         let s:String = client
    -          .get("version:1.0:subgraph:products:type:Query:hash:0df945dc1bc08f7fc02e8905b4c72aa9112f29bb7a214e4a38d199f0aa635b48:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c")
    +          .get("version:1.0:subgraph:products:type:Query:hash:0b4d791a3403d76643db0a9e4a8d304b1cd1f8c4ab68cb58ab7ccdc116a1da1c:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c")
               .await
               .unwrap();
         let v: Value = serde_json::from_str(&s).unwrap();
         insta::assert_json_snapshot!(v.as_object().unwrap().get("data").unwrap());
     
    -    let s: String = client.get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c").await.unwrap();
    +    let s: String = client.get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:04c47a3b857394fb0feef5b999adc073b8ab7416e3bc871f54c0b885daae8359:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c").await.unwrap();
         let v: Value = serde_json::from_str(&s).unwrap();
         insta::assert_json_snapshot!(v.as_object().unwrap().get("data").unwrap());
     
    @@ -525,7 +525,7 @@ async fn entity_cache() -> Result<(), BoxError> {
         insta::assert_json_snapshot!(response);
     
         let s:String = client
    -        .get("version:1.0:subgraph:reviews:type:Product:entity:d9a4cd73308dd13ca136390c10340823f94c335b9da198d2339c886c738abf0d:hash:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c")
    +        .get("version:1.0:subgraph:reviews:type:Product:entity:d9a4cd73308dd13ca136390c10340823f94c335b9da198d2339c886c738abf0d:hash:04c47a3b857394fb0feef5b999adc073b8ab7416e3bc871f54c0b885daae8359:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c")
             .await
             .unwrap();
         let v: Value = serde_json::from_str(&s).unwrap();
    @@ -746,7 +746,7 @@ async fn entity_cache_authorization() -> Result<(), BoxError> {
         insta::assert_json_snapshot!(response);
     
         let s:String = client
    -          .get("version:1.0:subgraph:products:type:Query:hash:0df945dc1bc08f7fc02e8905b4c72aa9112f29bb7a214e4a38d199f0aa635b48:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c")
    +          .get("version:1.0:subgraph:products:type:Query:hash:0b4d791a3403d76643db0a9e4a8d304b1cd1f8c4ab68cb58ab7ccdc116a1da1c:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c")
               .await
               .unwrap();
         let v: Value = serde_json::from_str(&s).unwrap();
    @@ -767,7 +767,7 @@ async fn entity_cache_authorization() -> Result<(), BoxError> {
         );
     
         let s: String = client
    -        .get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c")
    +        .get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:04c47a3b857394fb0feef5b999adc073b8ab7416e3bc871f54c0b885daae8359:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c")
             .await
             .unwrap();
         let v: Value = serde_json::from_str(&s).unwrap();
    @@ -811,7 +811,7 @@ async fn entity_cache_authorization() -> Result<(), BoxError> {
         insta::assert_json_snapshot!(response);
     
         let s:String = client
    -          .get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:3b6ef3c8fd34c469d59f513942c5f4c8f91135e828712de2024e2cd4613c50ae:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c")
    +          .get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:f7d6d3af2706afe346e3d5fd353e61bd186d2fc64cb7b3c13a62162189519b5f:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c")
               .await
               .unwrap();
         let v: Value = serde_json::from_str(&s).unwrap();
    diff --git a/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__query_planner_cache.snap b/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__query_planner_cache.snap
    index f90305be82..d7330676f2 100644
    --- a/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__query_planner_cache.snap
    +++ b/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__query_planner_cache.snap
    @@ -13,7 +13,7 @@ expression: query_plan
       "inputRewrites": null,
       "outputRewrites": null,
       "contextRewrites": null,
    -  "schemaAwareHash": "121b9859eba2d8fa6dde0a54b6e3781274cf69f7ffb0af912e92c01c6bfff6ca",
    +  "schemaAwareHash": "d38dcce02eea33b3834447eefedabb09d3b14f3b01ad512e881f9e65137f0565",
       "authorization": {
         "is_authenticated": false,
         "scopes": [],
    diff --git a/apollo-router/tests/snapshots/set_context__set_context.snap b/apollo-router/tests/snapshots/set_context__set_context.snap
    index 2e11680753..18bfcbfcc9 100644
    --- a/apollo-router/tests/snapshots/set_context__set_context.snap
    +++ b/apollo-router/tests/snapshots/set_context__set_context.snap
    @@ -34,7 +34,7 @@ expression: response
                   "operationKind": "query",
                   "operationName": "Query__Subgraph1__0",
                   "outputRewrites": null,
    -              "schemaAwareHash": "d7cb2d1809789d49360ca0a60570555f83855f00547675f366915c9d9d90fef9",
    +              "schemaAwareHash": "0163c552923b61fbde6dbcd879ffc2bb887175dc41bbf75a272875524e664e8d",
                   "serviceName": "Subgraph1",
                   "variableUsages": []
                 },
    @@ -80,7 +80,7 @@ expression: response
                         "typeCondition": "U"
                       }
                     ],
    -                "schemaAwareHash": "66b954f39aead8436321c671eb71e56ce15bbe0c7b82f06b2f8f70473ce1cb6e",
    +                "schemaAwareHash": "e64d79913c52a4a8b95bfae44986487a1ac73118f27df3b602972a5cbb1f360a",
                     "serviceName": "Subgraph1",
                     "variableUsages": [
                       "contextualArgument_1_0"
    diff --git a/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure.snap b/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure.snap
    index 703d8f9c59..099d36a7cb 100644
    --- a/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure.snap
    +++ b/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure.snap
    @@ -25,7 +25,7 @@ expression: response
                   "operationKind": "query",
                   "operationName": "Query_fetch_dependent_failure__Subgraph1__0",
                   "outputRewrites": null,
    -              "schemaAwareHash": "595c36c322602fefc4658fc0070973b51800c2d2debafae5571a7c9811d80745",
    +              "schemaAwareHash": "6bcaa7a2d52a416d5278eaef6be102427f328b6916075f193c87459516a7fb6d",
                   "serviceName": "Subgraph1",
                   "variableUsages": []
                 },
    @@ -71,7 +71,7 @@ expression: response
                         "typeCondition": "U"
                       }
                     ],
    -                "schemaAwareHash": "37bef7ad43bb477cdec4dfc02446bd2e11a6919dc14ab90e266af85fefde4abd",
    +                "schemaAwareHash": "0e56752501c8cbf53429c5aa2df95765ea2c7cba95db9213ce42918699232651",
                     "serviceName": "Subgraph1",
                     "variableUsages": [
                       "contextualArgument_1_0"
    diff --git a/apollo-router/tests/snapshots/set_context__set_context_list.snap b/apollo-router/tests/snapshots/set_context__set_context_list.snap
    index 095326167e..d6dd312f0a 100644
    --- a/apollo-router/tests/snapshots/set_context__set_context_list.snap
    +++ b/apollo-router/tests/snapshots/set_context__set_context_list.snap
    @@ -40,7 +40,7 @@ expression: response
                   "operationKind": "query",
                   "operationName": "Query__Subgraph1__0",
                   "outputRewrites": null,
    -              "schemaAwareHash": "4f746b9319e3ca4f234269464b6815eb97782f2ffe36774b998e7fb78f30abef",
    +              "schemaAwareHash": "805348468cefee0e3e745cb1bcec0ab4bd44ba55f6ddb91e52e0bc9b437c2dee",
                   "serviceName": "Subgraph1",
                   "variableUsages": []
                 },
    @@ -86,7 +86,7 @@ expression: response
                         "typeCondition": "U"
                       }
                     ],
    -                "schemaAwareHash": "66b954f39aead8436321c671eb71e56ce15bbe0c7b82f06b2f8f70473ce1cb6e",
    +                "schemaAwareHash": "e64d79913c52a4a8b95bfae44986487a1ac73118f27df3b602972a5cbb1f360a",
                     "serviceName": "Subgraph1",
                     "variableUsages": [
                       "contextualArgument_1_0"
    diff --git a/apollo-router/tests/snapshots/set_context__set_context_list_of_lists.snap b/apollo-router/tests/snapshots/set_context__set_context_list_of_lists.snap
    index e7fbee2a8b..c390c1db88 100644
    --- a/apollo-router/tests/snapshots/set_context__set_context_list_of_lists.snap
    +++ b/apollo-router/tests/snapshots/set_context__set_context_list_of_lists.snap
    @@ -44,7 +44,7 @@ expression: response
                   "operationKind": "query",
                   "operationName": "QueryLL__Subgraph1__0",
                   "outputRewrites": null,
    -              "schemaAwareHash": "babf88ea82c1330e535966572a55b03a2934097cd1cf905303b86ae7c197ccaf",
    +              "schemaAwareHash": "53e85332dda78d566187c8886c207b81acfe3ab5ea0cafd3d71fb0b153026d80",
                   "serviceName": "Subgraph1",
                   "variableUsages": []
                 },
    @@ -90,7 +90,7 @@ expression: response
                         "typeCondition": "U"
                       }
                     ],
    -                "schemaAwareHash": "a9b24549250c12e38c398c32e9218134fab000be3b934ebc6bb38ea096343646",
    +                "schemaAwareHash": "8ed6f85b6a77c293c97171b4a98f7dd563e98a737d4c3a9f5c54911248498ec7",
                     "serviceName": "Subgraph1",
                     "variableUsages": [
                       "contextualArgument_1_0"
    diff --git a/apollo-router/tests/snapshots/set_context__set_context_no_typenames.snap b/apollo-router/tests/snapshots/set_context__set_context_no_typenames.snap
    index 8eaa5b0202..e9743a7902 100644
    --- a/apollo-router/tests/snapshots/set_context__set_context_no_typenames.snap
    +++ b/apollo-router/tests/snapshots/set_context__set_context_no_typenames.snap
    @@ -32,7 +32,7 @@ expression: response
                   "operationKind": "query",
                   "operationName": "Query__Subgraph1__0",
                   "outputRewrites": null,
    -              "schemaAwareHash": "d7cb2d1809789d49360ca0a60570555f83855f00547675f366915c9d9d90fef9",
    +              "schemaAwareHash": "0163c552923b61fbde6dbcd879ffc2bb887175dc41bbf75a272875524e664e8d",
                   "serviceName": "Subgraph1",
                   "variableUsages": []
                 },
    @@ -78,7 +78,7 @@ expression: response
                         "typeCondition": "U"
                       }
                     ],
    -                "schemaAwareHash": "66b954f39aead8436321c671eb71e56ce15bbe0c7b82f06b2f8f70473ce1cb6e",
    +                "schemaAwareHash": "e64d79913c52a4a8b95bfae44986487a1ac73118f27df3b602972a5cbb1f360a",
                     "serviceName": "Subgraph1",
                     "variableUsages": [
                       "contextualArgument_1_0"
    diff --git a/apollo-router/tests/snapshots/set_context__set_context_type_mismatch.snap b/apollo-router/tests/snapshots/set_context__set_context_type_mismatch.snap
    index 1df052723e..3208b9bf0a 100644
    --- a/apollo-router/tests/snapshots/set_context__set_context_type_mismatch.snap
    +++ b/apollo-router/tests/snapshots/set_context__set_context_type_mismatch.snap
    @@ -32,7 +32,7 @@ expression: response
                   "operationKind": "query",
                   "operationName": "Query_type_mismatch__Subgraph1__0",
                   "outputRewrites": null,
    -              "schemaAwareHash": "7eae890e61f5ae512e112f5260abe0de3504041c92dbcc7aae0891c9bdf2222b",
    +              "schemaAwareHash": "34c8f7c0f16220c5d4b589c8da405f49510e092756fa98629c73dea06fd7c243",
                   "serviceName": "Subgraph1",
                   "variableUsages": []
                 },
    @@ -78,7 +78,7 @@ expression: response
                         "typeCondition": "U"
                       }
                     ],
    -                "schemaAwareHash": "d8ea99348ab32931371c85c09565cfb728d2e48cf017201cd79cb9ef860eb9c2",
    +                "schemaAwareHash": "feb578fd1831280f376d8961644e670dd8c3508d0a18fcf69a6de651e25e9ca8",
                     "serviceName": "Subgraph1",
                     "variableUsages": [
                       "contextualArgument_1_0"
    diff --git a/apollo-router/tests/snapshots/set_context__set_context_union.snap b/apollo-router/tests/snapshots/set_context__set_context_union.snap
    index e382988a8b..6c995c1e8b 100644
    --- a/apollo-router/tests/snapshots/set_context__set_context_union.snap
    +++ b/apollo-router/tests/snapshots/set_context__set_context_union.snap
    @@ -31,7 +31,7 @@ expression: response
                   "operationKind": "query",
                   "operationName": "QueryUnion__Subgraph1__0",
                   "outputRewrites": null,
    -              "schemaAwareHash": "b9124cd1daa6e8347175ffe2108670a31c73cbc983e7812ee39f415235541005",
    +              "schemaAwareHash": "3e768a1879f4ced427937721980688052b471dbfee0d653b212c85f2732591cc",
                   "serviceName": "Subgraph1",
                   "variableUsages": []
                 },
    @@ -80,7 +80,7 @@ expression: response
                             "typeCondition": "V"
                           }
                         ],
    -                    "schemaAwareHash": "c50ca82d402a330c1b35a6d76332094c40b00d6dec6f6b2a9b0a32ced68f4e95",
    +                    "schemaAwareHash": "0c190d5db5b15f89fa45de844d2cec59725986e44fcb0dbdb9ab870a197cf026",
                         "serviceName": "Subgraph1",
                         "variableUsages": [
                           "contextualArgument_1_1"
    @@ -134,7 +134,7 @@ expression: response
                             "typeCondition": "V"
                           }
                         ],
    -                    "schemaAwareHash": "ec99886497fee9b4f13565e19cadb13ae85c83de93acb53f298944b7a29e630e",
    +                    "schemaAwareHash": "2d7376a8d1f7f2a929361e838bb0435ed4c4a6194fa8754af52d4b6dc7140508",
                         "serviceName": "Subgraph1",
                         "variableUsages": [
                           "contextualArgument_1_1"
    diff --git a/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure.snap b/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure.snap
    index 605fd4570a..49dcf6bf9b 100644
    --- a/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure.snap
    +++ b/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure.snap
    @@ -34,7 +34,7 @@ expression: response
                   "operationKind": "query",
                   "operationName": "Query_fetch_failure__Subgraph1__0",
                   "outputRewrites": null,
    -              "schemaAwareHash": "1813ba1c272be0201096b4c4c963a07638e4f4b4ac1b97e0d90d634f2fcbac11",
    +              "schemaAwareHash": "84a7305d62d79b5bbca976c5522d6b32c5bbcbf76b495e4430f9cdcb51c80a57",
                   "serviceName": "Subgraph1",
                   "variableUsages": []
                 },
    @@ -73,7 +73,7 @@ expression: response
                             "typeCondition": "U"
                           }
                         ],
    -                    "schemaAwareHash": "1fdff97ad7facf07690c3e75e3dc7f1b11ff509268ef999250912a728e7a94c9",
    +                    "schemaAwareHash": "acb960692b01a756fcc627cafef1c47ead8afa60fa70828e5011ba9f825218ab",
                         "serviceName": "Subgraph2",
                         "variableUsages": []
                       },
    @@ -125,7 +125,7 @@ expression: response
                             "typeCondition": "U"
                           }
                         ],
    -                    "schemaAwareHash": "c9c571eac5df81ff34e5e228934d029ed322640c97ab6ad061cbee3cd81040dc",
    +                    "schemaAwareHash": "9fd65f6f213899810bce20180de6754354a25dc3c1bc97d0b7214a177cf8b0bb",
                         "serviceName": "Subgraph1",
                         "variableUsages": [
                           "contextualArgument_1_0"
    diff --git a/apollo-router/tests/snapshots/set_context__set_context_with_null.snap b/apollo-router/tests/snapshots/set_context__set_context_with_null.snap
    index 1e361f0a83..badc32bc8a 100644
    --- a/apollo-router/tests/snapshots/set_context__set_context_with_null.snap
    +++ b/apollo-router/tests/snapshots/set_context__set_context_with_null.snap
    @@ -29,7 +29,7 @@ expression: response
                   "inputRewrites": null,
                   "outputRewrites": null,
                   "contextRewrites": null,
    -              "schemaAwareHash": "19bd66a3ecc2d9495dffce2279774de3275cb027254289bb61b0c1937a7738b4",
    +              "schemaAwareHash": "4c0c9f83a57e9a50ff1f6dd601ec0a1588f1485d5cfb1015822af4017263e807",
                   "authorization": {
                     "is_authenticated": false,
                     "scopes": [],
    @@ -82,7 +82,7 @@ expression: response
                         "renameKeyTo": "contextualArgument_1_0"
                       }
                     ],
    -                "schemaAwareHash": "010ba25ca76f881bd9f0d5e338f9c07829d4d00e183828b6577d593aea0cf21e",
    +                "schemaAwareHash": "8db802e78024d406645f1ddc8972255e917bc738bfbed281691a45e34c92debb",
                     "authorization": {
                       "is_authenticated": false,
                       "scopes": [],
    diff --git a/apollo-router/tests/snapshots/type_conditions__type_conditions_disabled.snap b/apollo-router/tests/snapshots/type_conditions__type_conditions_disabled.snap
    index 84b137aa01..224cd2fb09 100644
    --- a/apollo-router/tests/snapshots/type_conditions__type_conditions_disabled.snap
    +++ b/apollo-router/tests/snapshots/type_conditions__type_conditions_disabled.snap
    @@ -79,7 +79,7 @@ expression: response
                   "inputRewrites": null,
                   "outputRewrites": null,
                   "contextRewrites": null,
    -              "schemaAwareHash": "0144f144d271437ed45f9d20706be86ffbf1e124d77c7add3db17d4a1498ce97",
    +              "schemaAwareHash": "5201830580c9c5fadd9c59aea072878f84465c1ae9d905207fa281aa7c1d5340",
                   "authorization": {
                     "is_authenticated": false,
                     "scopes": [],
    @@ -137,7 +137,7 @@ expression: response
                     "inputRewrites": null,
                     "outputRewrites": null,
                     "contextRewrites": null,
    -                "schemaAwareHash": "23759b36e5149924c757a8b9586adec2c0f6be04ecdf2c3c3ea277446daa690b",
    +                "schemaAwareHash": "62ff891f6971184d3e42b98f8166be72027b5479f9ec098af460a48ea6f6cbf4",
                     "authorization": {
                       "is_authenticated": false,
                       "scopes": [],
    diff --git a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled.snap b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled.snap
    index e41aeefee5..da66cee5c2 100644
    --- a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled.snap
    +++ b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled.snap
    @@ -79,7 +79,7 @@ expression: response
                   "inputRewrites": null,
                   "outputRewrites": null,
                   "contextRewrites": null,
    -              "schemaAwareHash": "0144f144d271437ed45f9d20706be86ffbf1e124d77c7add3db17d4a1498ce97",
    +              "schemaAwareHash": "5201830580c9c5fadd9c59aea072878f84465c1ae9d905207fa281aa7c1d5340",
                   "authorization": {
                     "is_authenticated": false,
                     "scopes": [],
    @@ -141,7 +141,7 @@ expression: response
                         "inputRewrites": null,
                         "outputRewrites": null,
                         "contextRewrites": null,
    -                    "schemaAwareHash": "23759b36e5149924c757a8b9586adec2c0f6be04ecdf2c3c3ea277446daa690b",
    +                    "schemaAwareHash": "62ff891f6971184d3e42b98f8166be72027b5479f9ec098af460a48ea6f6cbf4",
                         "authorization": {
                           "is_authenticated": false,
                           "scopes": [],
    @@ -201,7 +201,7 @@ expression: response
                         "inputRewrites": null,
                         "outputRewrites": null,
                         "contextRewrites": null,
    -                    "schemaAwareHash": "8ee58ad8b4823bcbda9126d2565e1cb04bf91ff250b1098476a1d7614a870121",
    +                    "schemaAwareHash": "7e6f6850777335eb1421a30a45f6888bb9e5d0acf8f55d576d55d1c4b7d23ec7",
                         "authorization": {
                           "is_authenticated": false,
                           "scopes": [],
    diff --git a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_generate_query_fragments.snap b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_generate_query_fragments.snap
    index d92517b39d..e5e2cc616a 100644
    --- a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_generate_query_fragments.snap
    +++ b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_generate_query_fragments.snap
    @@ -79,7 +79,7 @@ expression: response
                   "inputRewrites": null,
                   "outputRewrites": null,
                   "contextRewrites": null,
    -              "schemaAwareHash": "844dc4e409cdca1334abe37c347bd4e330123078dd7e65bda8dbb57ea5bdf59c",
    +              "schemaAwareHash": "0e1644746fe4beab7def35ec8cc12bde39874c6bb8b9dfd928456196b814a111",
                   "authorization": {
                     "is_authenticated": false,
                     "scopes": [],
    @@ -141,7 +141,7 @@ expression: response
                         "inputRewrites": null,
                         "outputRewrites": null,
                         "contextRewrites": null,
    -                    "schemaAwareHash": "ad82ce0af279c6a012d6b349ff823ba1467902223312aed1cdfc494ec3100b3e",
    +                    "schemaAwareHash": "6510f6b9672829bd9217618b78ef6f329fbddb125f88184d04e6faaa982ff8bb",
                         "authorization": {
                           "is_authenticated": false,
                           "scopes": [],
    @@ -201,7 +201,7 @@ expression: response
                         "inputRewrites": null,
                         "outputRewrites": null,
                         "contextRewrites": null,
    -                    "schemaAwareHash": "7c267302cf4a44a4463820237830155ab50be32c8860371d8a5c8ca905476360",
    +                    "schemaAwareHash": "6bc34c108f7cf81896971bffad76dc5275d46231b4dfe492ccc205dda9a4aa16",
                         "authorization": {
                           "is_authenticated": false,
                           "scopes": [],
    diff --git a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list.snap b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list.snap
    index acffc62599..9d70336225 100644
    --- a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list.snap
    +++ b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list.snap
    @@ -141,7 +141,7 @@ expression: response
                   "inputRewrites": null,
                   "outputRewrites": null,
                   "contextRewrites": null,
    -              "schemaAwareHash": "1343b4972ec8be54afe990c69711ce790992a814f9654e34e2ee2b25e4097e45",
    +              "schemaAwareHash": "51a7aadec14b66d9f6c737be7418bac0be1af89fcc55dac55d9e9b125bc3682d",
                   "authorization": {
                     "is_authenticated": false,
                     "scopes": [],
    @@ -204,7 +204,7 @@ expression: response
                         "inputRewrites": null,
                         "outputRewrites": null,
                         "contextRewrites": null,
    -                    "schemaAwareHash": "23759b36e5149924c757a8b9586adec2c0f6be04ecdf2c3c3ea277446daa690b",
    +                    "schemaAwareHash": "62ff891f6971184d3e42b98f8166be72027b5479f9ec098af460a48ea6f6cbf4",
                         "authorization": {
                           "is_authenticated": false,
                           "scopes": [],
    @@ -265,7 +265,7 @@ expression: response
                         "inputRewrites": null,
                         "outputRewrites": null,
                         "contextRewrites": null,
    -                    "schemaAwareHash": "8ee58ad8b4823bcbda9126d2565e1cb04bf91ff250b1098476a1d7614a870121",
    +                    "schemaAwareHash": "7e6f6850777335eb1421a30a45f6888bb9e5d0acf8f55d576d55d1c4b7d23ec7",
                         "authorization": {
                           "is_authenticated": false,
                           "scopes": [],
    diff --git a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list_of_list.snap b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list_of_list.snap
    index 2b8feaafc3..5a6a4b30bc 100644
    --- a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list_of_list.snap
    +++ b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list_of_list.snap
    @@ -145,7 +145,7 @@ expression: response
                   "inputRewrites": null,
                   "outputRewrites": null,
                   "contextRewrites": null,
    -              "schemaAwareHash": "3698f4e74ead34f43a949e1e8459850337a1a07245f8ed627b9203904b4cfff4",
    +              "schemaAwareHash": "e6f45a784fb669930586f13fc587f55798089a87ee4b23a7d1736e0516367a6a",
                   "authorization": {
                     "is_authenticated": false,
                     "scopes": [],
    @@ -209,7 +209,7 @@ expression: response
                         "inputRewrites": null,
                         "outputRewrites": null,
                         "contextRewrites": null,
    -                    "schemaAwareHash": "23759b36e5149924c757a8b9586adec2c0f6be04ecdf2c3c3ea277446daa690b",
    +                    "schemaAwareHash": "62ff891f6971184d3e42b98f8166be72027b5479f9ec098af460a48ea6f6cbf4",
                         "authorization": {
                           "is_authenticated": false,
                           "scopes": [],
    @@ -271,7 +271,7 @@ expression: response
                         "inputRewrites": null,
                         "outputRewrites": null,
                         "contextRewrites": null,
    -                    "schemaAwareHash": "8ee58ad8b4823bcbda9126d2565e1cb04bf91ff250b1098476a1d7614a870121",
    +                    "schemaAwareHash": "7e6f6850777335eb1421a30a45f6888bb9e5d0acf8f55d576d55d1c4b7d23ec7",
                         "authorization": {
                           "is_authenticated": false,
                           "scopes": [],
    diff --git a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_shouldnt_make_article_fetch.snap b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_shouldnt_make_article_fetch.snap
    index 5020d447b4..acd8fb6676 100644
    --- a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_shouldnt_make_article_fetch.snap
    +++ b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_shouldnt_make_article_fetch.snap
    @@ -54,7 +54,7 @@ expression: response
                   "inputRewrites": null,
                   "outputRewrites": null,
                   "contextRewrites": null,
    -              "schemaAwareHash": "0144f144d271437ed45f9d20706be86ffbf1e124d77c7add3db17d4a1498ce97",
    +              "schemaAwareHash": "5201830580c9c5fadd9c59aea072878f84465c1ae9d905207fa281aa7c1d5340",
                   "authorization": {
                     "is_authenticated": false,
                     "scopes": [],
    @@ -116,7 +116,7 @@ expression: response
                         "inputRewrites": null,
                         "outputRewrites": null,
                         "contextRewrites": null,
    -                    "schemaAwareHash": "23759b36e5149924c757a8b9586adec2c0f6be04ecdf2c3c3ea277446daa690b",
    +                    "schemaAwareHash": "62ff891f6971184d3e42b98f8166be72027b5479f9ec098af460a48ea6f6cbf4",
                         "authorization": {
                           "is_authenticated": false,
                           "scopes": [],
    @@ -176,7 +176,7 @@ expression: response
                         "inputRewrites": null,
                         "outputRewrites": null,
                         "contextRewrites": null,
    -                    "schemaAwareHash": "8ee58ad8b4823bcbda9126d2565e1cb04bf91ff250b1098476a1d7614a870121",
    +                    "schemaAwareHash": "7e6f6850777335eb1421a30a45f6888bb9e5d0acf8f55d576d55d1c4b7d23ec7",
                         "authorization": {
                           "is_authenticated": false,
                           "scopes": [],
    
    From 76480c65ac5e46df9b04f799e27f3e922aa2d07e Mon Sep 17 00:00:00 2001
    From: Jesse Rosenberger 
    Date: Tue, 27 Aug 2024 21:50:46 +0300
    Subject: [PATCH 098/108] Apply suggestions from code review
    
    Co-authored-by: Edward Huang 
    ---
     .changesets/feat_candle_exhale_deodorant_weeds.md      |  6 +++---
     .../feat_enabling_both_best_effort_query_planners.md   |  2 +-
     .changesets/feat_geal_return_response_with_errors.md   |  4 ++--
     .changesets/feat_tninesling_cost_directives.md         | 10 ++++++----
     .changesets/fix_bryn_revert_5703.md                    |  4 ++--
     .changesets/fix_customer_snore_infant_wrap.md          |  4 ++--
     .changesets/fix_geal_subgraph_error_path.md            |  4 ++--
     .changesets/fix_geal_test_private_info_caching.md      |  4 ++--
     .changesets/fix_renee_operation_variables.md           |  4 ++--
     .changesets/fix_tninesling_cost_result_filtering.md    |  4 ++--
     .../fix_tninesling_demand_control_score_arguments.md   |  2 +-
     11 files changed, 25 insertions(+), 23 deletions(-)
    
    diff --git a/.changesets/feat_candle_exhale_deodorant_weeds.md b/.changesets/feat_candle_exhale_deodorant_weeds.md
    index 4e86c7c6f7..00b59a4e3f 100644
    --- a/.changesets/feat_candle_exhale_deodorant_weeds.md
    +++ b/.changesets/feat_candle_exhale_deodorant_weeds.md
    @@ -15,11 +15,11 @@ telemetry:
               level: info
               on: request
               attributes:
    -            subgraph.response.status:
    -              subgraph_response_status: code # This is a first warning because you can't access to the response if you're at the request stage
    +            subgraph.response.status: code
    +              # Warning: should use selector for subgraph_name: true instead of comparing strings of subgraph_name and product
               condition:
                 eq:
    -            - subgraph_name # Another warning because instead of writing subgraph_name: true which is the selector, you're asking for a comparison between 2 strings ("subgraph_name" and "product")
    +            - subgraph_name
                 - product
     ```
     
    diff --git a/.changesets/feat_enabling_both_best_effort_query_planners.md b/.changesets/feat_enabling_both_best_effort_query_planners.md
    index 75b236dc98..ee636378b0 100644
    --- a/.changesets/feat_enabling_both_best_effort_query_planners.md
    +++ b/.changesets/feat_enabling_both_best_effort_query_planners.md
    @@ -1,4 +1,4 @@
    -### Enable native (rust) query planner to run in the background ([PR #5790](https://github.com/apollographql/router/pull/5790), [PR #5811](https://github.com/apollographql/router/pull/5811), [PR #5771](https://github.com/apollographql/router/pull/5771), [PR #5860](https://github.com/apollographql/router/pull/5860))
    +### Enable native query planner to run in the background ([PR #5790](https://github.com/apollographql/router/pull/5790), [PR #5811](https://github.com/apollographql/router/pull/5811), [PR #5771](https://github.com/apollographql/router/pull/5771), [PR #5860](https://github.com/apollographql/router/pull/5860))
     
     The router now schedules background jobs to run the native query planner in
     order to compare its results to the legacy implementation. This is one of the
    diff --git a/.changesets/feat_geal_return_response_with_errors.md b/.changesets/feat_geal_return_response_with_errors.md
    index 5d83ad3fa1..0697a0ced8 100644
    --- a/.changesets/feat_geal_return_response_with_errors.md
    +++ b/.changesets/feat_geal_return_response_with_errors.md
    @@ -1,5 +1,5 @@
    -### Entity cache: return cached entities with errors ([PR #5776](https://github.com/apollographql/router/pull/5776))
    +### Entity cache returns cached entities with errors ([PR #5776](https://github.com/apollographql/router/pull/5776))
     
    -If we are requesting entities from a subgraph, where some of them are present in cache, and the subgraph is unavailable (ex: network issue), we want to return a response with the entities we got from the cache, other entities nullified, and an error pointing at the paths of unavailable entities.
    +When requesting entities from a subgraph where some entities are cached but the subgraph is unavailable (for example, due to a network issue), the router now returns a response with the cached entities retrieved, the unavailable entities nullified, and an error pointing at the paths of the unavailable entities.
     
     By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5776
    \ No newline at end of file
    diff --git a/.changesets/feat_tninesling_cost_directives.md b/.changesets/feat_tninesling_cost_directives.md
    index e7994edc84..07b05cbdf6 100644
    --- a/.changesets/feat_tninesling_cost_directives.md
    +++ b/.changesets/feat_tninesling_cost_directives.md
    @@ -1,8 +1,8 @@
    -### Account for demand control directives when scoring operations ([PR #5777](https://github.com/apollographql/router/pull/5777))
    +### Support demand control directives ([PR #5777](https://github.com/apollographql/router/pull/5777))
     
    -When scoring operations in the demand control plugin, utilize applications of `@cost` and `@listSize` from the supergraph schema to make better cost estimates.
    +The router supports two new demand control directives, `@cost` and `@listSize`, that you can use to provide more accurate estimates of GraphQL operation costs to the router's demand control plugin.
     
    -For expensive resolvers, the `@cost` directive can override the default weights in the cost calculation.
    +Use the `@cost` directive to customize the weights of operation cost calculations, particularly for expensive resolvers.
     
     ```graphql
     type Product {
    @@ -12,7 +12,7 @@ type Product {
     }
     ```
     
    -Additionally, if a list field's length differs significantly from the globally-configured list size, the `@listSize` directive can provide a tighter size estimate.
    +Use the `@listSize` directive to provide a more accurate estimate for the size of a specific list field, particularly for those that differ greatly from the global list size estimate.
     
     ```graphql
     type Magazine {
    @@ -25,4 +25,6 @@ type Magazine {
     }
     ```
     
    +To learn more, go to [Demand Control](https://www.apollographql.com/docs/router/executing-operations/demand-control/) docs.
    +
     By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5777
    diff --git a/.changesets/fix_bryn_revert_5703.md b/.changesets/fix_bryn_revert_5703.md
    index 56145fc3cd..bc072c4a8b 100644
    --- a/.changesets/fix_bryn_revert_5703.md
    +++ b/.changesets/fix_bryn_revert_5703.md
    @@ -1,5 +1,5 @@
    -### Datadog underreported APM metrics ([PR #5780](https://github.com/apollographql/router/pull/5780))
    +### Fix Datadog underreporting APM metrics ([PR #5780](https://github.com/apollographql/router/pull/5780))
     
    -This reverts [PR #5703](https://github.com/apollographql/router/pull/5703) which causes Datadog APM span metrics to be under-reported.
    +The previous [PR #5703](https://github.com/apollographql/router/pull/5703) has been reverted in this release because it caused Datadog to underreport APM span metrics.
     
     By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5780
    diff --git a/.changesets/fix_customer_snore_infant_wrap.md b/.changesets/fix_customer_snore_infant_wrap.md
    index ead868d4de..c69784386d 100644
    --- a/.changesets/fix_customer_snore_infant_wrap.md
    +++ b/.changesets/fix_customer_snore_infant_wrap.md
    @@ -1,5 +1,5 @@
    -### Allow to use progressive override with federation 2.7 and above ([PR #5754](https://github.com/apollographql/router/pull/5754))
    +### Enable progressive override with federation 2.7 and above ([PR #5754](https://github.com/apollographql/router/pull/5754))
     
    -The progressive override feature is now properly available using federation 2.7 and above.
    +The progressive override feature is now available when using Federation v2.7 and above.
     
     By [@o0ignition0o](https://github.com/o0ignition0o) in https://github.com/apollographql/router/pull/5754
    diff --git a/.changesets/fix_geal_subgraph_error_path.md b/.changesets/fix_geal_subgraph_error_path.md
    index 21c32032eb..f68b129de2 100644
    --- a/.changesets/fix_geal_subgraph_error_path.md
    +++ b/.changesets/fix_geal_subgraph_error_path.md
    @@ -1,5 +1,5 @@
    -### set the subgraph error path if not present ([PR #5773](https://github.com/apollographql/router/pull/5773))
    +### Set subgraph error path if not present ([PR #5773](https://github.com/apollographql/router/pull/5773))
     
    -This fixes subgraph response conversion to set the error path in all cases. For some network level errors, the subgraph service was not setting the path
    +The router now sets the error path in all cases during subgraph response conversion. Previously the router's subgraph service didn't set the error path for some network-level errors.
     
     By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5773
    \ No newline at end of file
    diff --git a/.changesets/fix_geal_test_private_info_caching.md b/.changesets/fix_geal_test_private_info_caching.md
    index 22a83ff8b2..94c394758c 100644
    --- a/.changesets/fix_geal_test_private_info_caching.md
    +++ b/.changesets/fix_geal_test_private_info_caching.md
    @@ -1,5 +1,5 @@
    -### Entity cache fix: update the cache key with private info on the first call ([PR #5599](https://github.com/apollographql/router/pull/5599))
    +### Fix private information caching in entity cache ([PR #5599](https://github.com/apollographql/router/pull/5599))
     
    -This adds a test for private information caching and fixes an issue where private data was stored at the wrong key, so it did not appear to be cached
    +The router previously had an issue where private data could be stored at the wrong key, resulting in the data not appearing to be cached. This has been fixed by updating the cache key with the private data. 
     
     By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5599
    \ No newline at end of file
    diff --git a/.changesets/fix_renee_operation_variables.md b/.changesets/fix_renee_operation_variables.md
    index 99a795a190..5b72677432 100644
    --- a/.changesets/fix_renee_operation_variables.md
    +++ b/.changesets/fix_renee_operation_variables.md
    @@ -1,8 +1,8 @@
     ### Fix GraphQL query directives validation bug ([PR #5753](https://github.com/apollographql/router/pull/5753))
     
    -GraphQL supports an obscure syntax, where a variable is used in a directive application on the same operation where the variable is declared.
    +The router now supports GraphQL queries where a variable is used in a directive on the same operation where the variable is declared. 
     
    -The router used to reject queries like this, but now they are accepted:
    +For example, the following query both declares and uses `$var`: 
     
     ```graphql
     query GetSomething($var: Int!) @someDirective(argument: $var) {
    diff --git a/.changesets/fix_tninesling_cost_result_filtering.md b/.changesets/fix_tninesling_cost_result_filtering.md
    index c1a773e134..57bdfbe67f 100644
    --- a/.changesets/fix_tninesling_cost_result_filtering.md
    +++ b/.changesets/fix_tninesling_cost_result_filtering.md
    @@ -1,6 +1,6 @@
     ### Fix cost result filtering for custom metrics ([PR #5838](https://github.com/apollographql/router/pull/5838))
     
    -Fix filtering for custom metrics that use demand control cost information in their conditions. This allows a telemetry config such as:
    +The router can now filter for custom metrics that use demand control cost information in their conditions. This allows a telemetry config such as the following:
     
     ```yaml
     telemetry:
    @@ -19,7 +19,7 @@ telemetry:
                   - "COST_ESTIMATED_TOO_EXPENSIVE"
     ```
     
    -Additionally, this fixes an issue with attribute comparisons which would silently fail to compare integers to float values. Now, users can write integer values in conditions that compare against selectors that select floats:
    +This also fixes an issue where attribute comparisons would fail silently when comparing integers to float values. Users can now write integer values in conditions that compare against selectors that select floats:
     
     ```yaml
     telemetry:
    diff --git a/.changesets/fix_tninesling_demand_control_score_arguments.md b/.changesets/fix_tninesling_demand_control_score_arguments.md
    index a0c9e2e5d2..200523fb00 100644
    --- a/.changesets/fix_tninesling_demand_control_score_arguments.md
    +++ b/.changesets/fix_tninesling_demand_control_score_arguments.md
    @@ -1,5 +1,5 @@
     ### Add argument cost to type cost in demand control scoring algorithm ([PR #5740](https://github.com/apollographql/router/pull/5740))
     
    -When scoring operations in the demand control plugin, include field arguments in the type cost.
    +The router's operation scoring algorithm for demand control now includes field arguments in the type cost.
     
     By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5740
    
    From 21f3f1aac2e7db1c99d3fc918c309f6017f68df3 Mon Sep 17 00:00:00 2001
    From: Edward Huang 
    Date: Tue, 27 Aug 2024 13:05:47 -0700
    Subject: [PATCH 099/108] remove entity caching changesets (#5901)
    
    ---
     .../exp_geal_entity_cache_documentation.md    | 27 -------------------
     .../feat_geal_return_response_with_errors.md  |  5 ----
     .../fix_geal_test_private_info_caching.md     |  5 ----
     3 files changed, 37 deletions(-)
     delete mode 100644 .changesets/exp_geal_entity_cache_documentation.md
     delete mode 100644 .changesets/feat_geal_return_response_with_errors.md
     delete mode 100644 .changesets/fix_geal_test_private_info_caching.md
    
    diff --git a/.changesets/exp_geal_entity_cache_documentation.md b/.changesets/exp_geal_entity_cache_documentation.md
    deleted file mode 100644
    index 546cc6cd40..0000000000
    --- a/.changesets/exp_geal_entity_cache_documentation.md
    +++ /dev/null
    @@ -1,27 +0,0 @@
    -### Entity cache preview ([PR #5574](https://github.com/apollographql/router/pull/5574))
    -
    -#### Support private information caching
    -
    -The router supports a new `private_id` option that enables separate, private cache entries to be allocated per user for authenticated requests.
    -
    -When a subgraph returns a `Cache-Control: private` header, the response data shouldn't be cached and shared among users. However, since the router supports request authentication, it can use it to allocate separate cache entries per users. 
    -
    -To enable this, configure the `private_id` to be the name of a key in the request context that contains the data that's used to differentiate users. This option must be paired with a coprocessor or Rhai script to set the value in context.
    -
    -Example configuration:
    -
    -```yaml title="router.yaml"
    -# Enable entity caching globally
    -preview_entity_cache:
    -  enabled: true
    -  subgraph:
    -    all:
    -      enabled: true
    -      accounts:
    -        private_id: "user_id"
    -```
    -
    -
    -To learn more about configuring and customizing private information caching, go to [Private information caching](https://www.apollographql.com/docs/router/configuration/entity-caching/#private-information-caching) docs.
    - 
    -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5574
    \ No newline at end of file
    diff --git a/.changesets/feat_geal_return_response_with_errors.md b/.changesets/feat_geal_return_response_with_errors.md
    deleted file mode 100644
    index 0697a0ced8..0000000000
    --- a/.changesets/feat_geal_return_response_with_errors.md
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -### Entity cache returns cached entities with errors ([PR #5776](https://github.com/apollographql/router/pull/5776))
    -
    -When requesting entities from a subgraph where some entities are cached but the subgraph is unavailable (for example, due to a network issue), the router now returns a response with the cached entities retrieved, the unavailable entities nullified, and an error pointing at the paths of the unavailable entities.
    -
    -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5776
    \ No newline at end of file
    diff --git a/.changesets/fix_geal_test_private_info_caching.md b/.changesets/fix_geal_test_private_info_caching.md
    deleted file mode 100644
    index 94c394758c..0000000000
    --- a/.changesets/fix_geal_test_private_info_caching.md
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -### Fix private information caching in entity cache ([PR #5599](https://github.com/apollographql/router/pull/5599))
    -
    -The router previously had an issue where private data could be stored at the wrong key, resulting in the data not appearing to be cached. This has been fixed by updating the cache key with the private data. 
    -
    -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5599
    \ No newline at end of file
    
    From 9ba1071748b8a581de772e8fb4c9e0ec5690c1ee Mon Sep 17 00:00:00 2001
    From: Taylor Ninesling 
    Date: Tue, 27 Aug 2024 22:47:28 -0700
    Subject: [PATCH 100/108] Update to router-bridge@0.6.0+v2.9.0 (#5902)
    
    ---
     Cargo.lock                               | 32 +++---------------------
     apollo-router/Cargo.toml                 |  2 +-
     apollo-router/tests/integration/redis.rs | 14 +++++------
     fuzz/Cargo.toml                          |  2 +-
     4 files changed, 13 insertions(+), 37 deletions(-)
    
    diff --git a/Cargo.lock b/Cargo.lock
    index 9dcb9fe8d3..5c503b2eb9 100644
    --- a/Cargo.lock
    +++ b/Cargo.lock
    @@ -335,7 +335,7 @@ dependencies = [
      "reqwest",
      "rhai",
      "rmp",
    - "router-bridge 0.6.0-beta.1+v2.9.0-beta.0",
    + "router-bridge",
      "rowan",
      "rstack",
      "rust-embed",
    @@ -5781,33 +5781,9 @@ dependencies = [
     
     [[package]]
     name = "router-bridge"
    -version = "0.5.31+v2.8.5"
    +version = "0.6.0+v2.9.0"
     source = "registry+https://github.com/rust-lang/crates.io-index"
    -checksum = "672901b1ec6fd110ac41d61ca5e1754319d0edf39546a089a114ab865d42ae97"
    -dependencies = [
    - "anyhow",
    - "async-channel 1.9.0",
    - "deno_console",
    - "deno_core",
    - "deno_url",
    - "deno_web",
    - "deno_webidl",
    - "rand 0.8.5",
    - "serde",
    - "serde_json",
    - "thiserror",
    - "tokio",
    - "tower",
    - "tower-service",
    - "tracing",
    - "which",
    -]
    -
    -[[package]]
    -name = "router-bridge"
    -version = "0.6.0-beta.1+v2.9.0-beta.0"
    -source = "registry+https://github.com/rust-lang/crates.io-index"
    -checksum = "349dcc3134916c7888f2ebbb5c66fefa2693a1f9ff522e04672abb895b66cb9b"
    +checksum = "96ef4910ade6753863c8437a76e88e236ab91688dcfe739d73417ae7848f3b92"
     dependencies = [
      "anyhow",
      "async-channel 1.9.0",
    @@ -5842,7 +5818,7 @@ dependencies = [
      "libfuzzer-sys",
      "log",
      "reqwest",
    - "router-bridge 0.5.31+v2.8.5",
    + "router-bridge",
      "schemars",
      "serde",
      "serde_json",
    diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml
    index d1e334a624..1e8e8fb518 100644
    --- a/apollo-router/Cargo.toml
    +++ b/apollo-router/Cargo.toml
    @@ -197,7 +197,7 @@ regex = "1.10.5"
     reqwest.workspace = true
     
     # note: this dependency should _always_ be pinned, prefix the version with an `=`
    -router-bridge = "=0.6.0-beta.1+v2.9.0-beta.0"
    +router-bridge = "=0.6.0+v2.9.0"
     
     rust-embed = { version = "8.4.0", features = ["include-exclude"] }
     rustls = "0.21.12"
    diff --git a/apollo-router/tests/integration/redis.rs b/apollo-router/tests/integration/redis.rs
    index 75a812d202..f95ba37c65 100644
    --- a/apollo-router/tests/integration/redis.rs
    +++ b/apollo-router/tests/integration/redis.rs
    @@ -26,7 +26,7 @@ async fn query_planner_cache() -> Result<(), BoxError> {
         // 2. run `docker compose up -d` and connect to the redis container by running `docker-compose exec redis /bin/bash`.
         // 3. Run the `redis-cli` command from the shell and start the redis `monitor` command.
         // 4. Run this test and yank the updated cache key from the redis logs.
    -    let known_cache_key = "plan:0:v2.9.0-beta.0:16385ebef77959fcdc520ad507eb1f7f7df28f1d54a0569e3adabcb4cd00d7ce:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:8ecc6cbc98bab2769e6666a72ba47a4ebd90e6f62256ddcbdc7f352a805e0fe6";
    +    let known_cache_key = "plan:0:v2.9.0:16385ebef77959fcdc520ad507eb1f7f7df28f1d54a0569e3adabcb4cd00d7ce:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:8ecc6cbc98bab2769e6666a72ba47a4ebd90e6f62256ddcbdc7f352a805e0fe6";
     
         let config = RedisConfig::from_url("redis://127.0.0.1:6379").unwrap();
         let client = RedisClient::new(config, None, None, None);
    @@ -921,7 +921,7 @@ async fn connection_failure_blocks_startup() {
     async fn query_planner_redis_update_query_fragments() {
         test_redis_query_plan_config_update(
             include_str!("fixtures/query_planner_redis_config_update_query_fragments.router.yaml"),
    -        "plan:0:v2.9.0-beta.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:cda2b4e476fdce9c4c435627b26cedd177cfbe04ab335fc3e3d895c0d79d965e",
    +        "plan:0:v2.9.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:cda2b4e476fdce9c4c435627b26cedd177cfbe04ab335fc3e3d895c0d79d965e",
         )
         .await;
     }
    @@ -940,7 +940,7 @@ async fn query_planner_redis_update_planner_mode() {
     async fn query_planner_redis_update_introspection() {
         test_redis_query_plan_config_update(
             include_str!("fixtures/query_planner_redis_config_update_introspection.router.yaml"),
    -        "plan:0:v2.9.0-beta.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:259dd917e4de09b5469629849b91e8ffdfbed2587041fad68b5963369bb13283",
    +        "plan:0:v2.9.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:259dd917e4de09b5469629849b91e8ffdfbed2587041fad68b5963369bb13283",
         )
         .await;
     }
    @@ -949,7 +949,7 @@ async fn query_planner_redis_update_introspection() {
     async fn query_planner_redis_update_defer() {
         test_redis_query_plan_config_update(
             include_str!("fixtures/query_planner_redis_config_update_defer.router.yaml"),
    -        "plan:0:v2.9.0-beta.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:e4376fe032160ce16399e520c6e815da6cb5cf4dc94a06175b86b64a9bf80201",
    +        "plan:0:v2.9.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:e4376fe032160ce16399e520c6e815da6cb5cf4dc94a06175b86b64a9bf80201",
         )
         .await;
     }
    @@ -960,7 +960,7 @@ async fn query_planner_redis_update_type_conditional_fetching() {
             include_str!(
                 "fixtures/query_planner_redis_config_update_type_conditional_fetching.router.yaml"
             ),
    -        "plan:0:v2.9.0-beta.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:83d899fcb42d2202c39fc8350289b8247021da00ecf3d844553c190c49410507",
    +        "plan:0:v2.9.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:83d899fcb42d2202c39fc8350289b8247021da00ecf3d844553c190c49410507",
         )
         .await;
     }
    @@ -971,7 +971,7 @@ async fn query_planner_redis_update_reuse_query_fragments() {
             include_str!(
                 "fixtures/query_planner_redis_config_update_reuse_query_fragments.router.yaml"
             ),
    -        "plan:0:v2.9.0-beta.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:d48f92f892bd67071694c0538a7e657ff8e0c52e1718f475190c17b503e9e8c3",
    +        "plan:0:v2.9.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:d48f92f892bd67071694c0538a7e657ff8e0c52e1718f475190c17b503e9e8c3",
         )
         .await;
     }
    @@ -994,7 +994,7 @@ async fn test_redis_query_plan_config_update(updated_config: &str, new_cache_key
         router.assert_started().await;
         router.clear_redis_cache().await;
     
    -    let starting_key = "plan:0:v2.9.0-beta.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:0966f1528d47cee30b6140a164be16148dd360ee10b87744991e9d35af8e8a27";
    +    let starting_key = "plan:0:v2.9.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:0966f1528d47cee30b6140a164be16148dd360ee10b87744991e9d35af8e8a27";
         router.execute_default_query().await;
         router.assert_redis_cache_contains(starting_key, None).await;
         router.update_config(updated_config).await;
    diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml
    index 15eff54993..f01ccac721 100644
    --- a/fuzz/Cargo.toml
    +++ b/fuzz/Cargo.toml
    @@ -20,7 +20,7 @@ reqwest = { workspace = true, features = ["json", "blocking"] }
     serde_json.workspace = true
     tokio.workspace = true
     # note: this dependency should _always_ be pinned, prefix the version with an `=`
    -router-bridge = "=0.5.31+v2.8.5"
    +router-bridge = "=0.6.0+v2.9.0"
     
     [dev-dependencies]
     anyhow = "1"
    
    From c91be1da16d95b092c25a549376c4e150168006f Mon Sep 17 00:00:00 2001
    From: Jesse Rosenberger 
    Date: Wed, 28 Aug 2024 08:50:25 +0300
    Subject: [PATCH 101/108] prep release: v1.53.0-rc.1
    
    ---
     Cargo.lock                                    |    8 +-
     apollo-federation/Cargo.toml                  |    2 +-
     apollo-router-benchmarks/Cargo.toml           |    2 +-
     apollo-router-scaffold/Cargo.toml             |    2 +-
     .../templates/base/Cargo.template.toml        |    2 +-
     .../templates/base/xtask/Cargo.template.toml  |    2 +-
     apollo-router/Cargo.toml                      |    4 +-
     .../tracing/docker-compose.datadog.yml        |    2 +-
     dockerfiles/tracing/docker-compose.jaeger.yml |    2 +-
     dockerfiles/tracing/docker-compose.zipkin.yml |    2 +-
     helm/chart/router/Chart.yaml                  |    4 +-
     helm/chart/router/README.md                   |    6 +-
     licenses.html                                 | 2916 ++++++++---------
     scripts/install.sh                            |    2 +-
     14 files changed, 1304 insertions(+), 1652 deletions(-)
    
    diff --git a/Cargo.lock b/Cargo.lock
    index 5c503b2eb9..ac59d9dd47 100644
    --- a/Cargo.lock
    +++ b/Cargo.lock
    @@ -178,7 +178,7 @@ dependencies = [
     
     [[package]]
     name = "apollo-federation"
    -version = "1.53.0-rc.0"
    +version = "1.53.0-rc.1"
     dependencies = [
      "apollo-compiler",
      "derive_more",
    @@ -229,7 +229,7 @@ dependencies = [
     
     [[package]]
     name = "apollo-router"
    -version = "1.53.0-rc.0"
    +version = "1.53.0-rc.1"
     dependencies = [
      "access-json",
      "ahash",
    @@ -398,7 +398,7 @@ dependencies = [
     
     [[package]]
     name = "apollo-router-benchmarks"
    -version = "1.53.0-rc.0"
    +version = "1.53.0-rc.1"
     dependencies = [
      "apollo-parser",
      "apollo-router",
    @@ -414,7 +414,7 @@ dependencies = [
     
     [[package]]
     name = "apollo-router-scaffold"
    -version = "1.53.0-rc.0"
    +version = "1.53.0-rc.1"
     dependencies = [
      "anyhow",
      "cargo-scaffold",
    diff --git a/apollo-federation/Cargo.toml b/apollo-federation/Cargo.toml
    index c03290d5dd..0f62efa5b6 100644
    --- a/apollo-federation/Cargo.toml
    +++ b/apollo-federation/Cargo.toml
    @@ -1,6 +1,6 @@
     [package]
     name = "apollo-federation"
    -version = "1.53.0-rc.0"
    +version = "1.53.0-rc.1"
     authors = ["The Apollo GraphQL Contributors"]
     edition = "2021"
     description = "Apollo Federation"
    diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml
    index cbe2cf5a06..4c84421402 100644
    --- a/apollo-router-benchmarks/Cargo.toml
    +++ b/apollo-router-benchmarks/Cargo.toml
    @@ -1,6 +1,6 @@
     [package]
     name = "apollo-router-benchmarks"
    -version = "1.53.0-rc.0"
    +version = "1.53.0-rc.1"
     authors = ["Apollo Graph, Inc. "]
     edition = "2021"
     license = "Elastic-2.0"
    diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml
    index 270a69c5c3..b4217e82b5 100644
    --- a/apollo-router-scaffold/Cargo.toml
    +++ b/apollo-router-scaffold/Cargo.toml
    @@ -1,6 +1,6 @@
     [package]
     name = "apollo-router-scaffold"
    -version = "1.53.0-rc.0"
    +version = "1.53.0-rc.1"
     authors = ["Apollo Graph, Inc. "]
     edition = "2021"
     license = "Elastic-2.0"
    diff --git a/apollo-router-scaffold/templates/base/Cargo.template.toml b/apollo-router-scaffold/templates/base/Cargo.template.toml
    index 7a70c7e031..b66b95c69c 100644
    --- a/apollo-router-scaffold/templates/base/Cargo.template.toml
    +++ b/apollo-router-scaffold/templates/base/Cargo.template.toml
    @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" }
     apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" }
     {{else}}
     # Note if you update these dependencies then also update xtask/Cargo.toml
    -apollo-router = "1.53.0-rc.0"
    +apollo-router = "1.53.0-rc.1"
     {{/if}}
     {{/if}}
     async-trait = "0.1.52"
    diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml
    index 49b67b124a..e3fa00ed97 100644
    --- a/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml
    +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml
    @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" }
     {{#if branch}}
     apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" }
     {{else}}
    -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.53.0-rc.0" }
    +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.53.0-rc.1" }
     {{/if}}
     {{/if}}
     anyhow = "1.0.58"
    diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml
    index 1e8e8fb518..6265d716fa 100644
    --- a/apollo-router/Cargo.toml
    +++ b/apollo-router/Cargo.toml
    @@ -1,6 +1,6 @@
     [package]
     name = "apollo-router"
    -version = "1.53.0-rc.0"
    +version = "1.53.0-rc.1"
     authors = ["Apollo Graph, Inc. "]
     repository = "https://github.com/apollographql/router/"
     documentation = "https://docs.rs/apollo-router"
    @@ -68,7 +68,7 @@ askama = "0.12.1"
     access-json = "0.1.0"
     anyhow = "1.0.86"
     apollo-compiler.workspace = true
    -apollo-federation = { path = "../apollo-federation", version = "=1.53.0-rc.0" }
    +apollo-federation = { path = "../apollo-federation", version = "=1.53.0-rc.1" }
     arc-swap = "1.6.0"
     async-channel = "1.9.0"
     async-compression = { version = "0.4.6", features = [
    diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml
    index cb42c32e3e..3043bbd6ff 100644
    --- a/dockerfiles/tracing/docker-compose.datadog.yml
    +++ b/dockerfiles/tracing/docker-compose.datadog.yml
    @@ -3,7 +3,7 @@ services:
     
       apollo-router:
         container_name: apollo-router
    -    image: ghcr.io/apollographql/router:v1.53.0-rc.0
    +    image: ghcr.io/apollographql/router:v1.53.0-rc.1
         volumes:
           - ./supergraph.graphql:/etc/config/supergraph.graphql
           - ./router/datadog.router.yaml:/etc/config/configuration.yaml
    diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml
    index 44fdc1d490..f8364b6fbc 100644
    --- a/dockerfiles/tracing/docker-compose.jaeger.yml
    +++ b/dockerfiles/tracing/docker-compose.jaeger.yml
    @@ -4,7 +4,7 @@ services:
       apollo-router:
         container_name: apollo-router
         #build: ./router
    -    image: ghcr.io/apollographql/router:v1.53.0-rc.0
    +    image: ghcr.io/apollographql/router:v1.53.0-rc.1
         volumes:
           - ./supergraph.graphql:/etc/config/supergraph.graphql
           - ./router/jaeger.router.yaml:/etc/config/configuration.yaml
    diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml
    index f4520a9e7f..42ac11aa6d 100644
    --- a/dockerfiles/tracing/docker-compose.zipkin.yml
    +++ b/dockerfiles/tracing/docker-compose.zipkin.yml
    @@ -4,7 +4,7 @@ services:
       apollo-router:
         container_name: apollo-router
         build: ./router
    -    image: ghcr.io/apollographql/router:v1.53.0-rc.0
    +    image: ghcr.io/apollographql/router:v1.53.0-rc.1
         volumes:
           - ./supergraph.graphql:/etc/config/supergraph.graphql
           - ./router/zipkin.router.yaml:/etc/config/configuration.yaml
    diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml
    index 386ba56186..58f0f433e7 100644
    --- a/helm/chart/router/Chart.yaml
    +++ b/helm/chart/router/Chart.yaml
    @@ -20,10 +20,10 @@ type: application
     # so it matches the shape of our release process and release automation.
     # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix
     # of "v" is not included.
    -version: 1.53.0-rc.0
    +version: 1.53.0-rc.1
     
     # This is the version number of the application being deployed. This version number should be
     # incremented each time you make changes to the application. Versions are not expected to
     # follow Semantic Versioning. They should reflect the version the application is using.
     # It is recommended to use it with quotes.
    -appVersion: "v1.53.0-rc.0"
    +appVersion: "v1.53.0-rc.1"
    diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md
    index 284ceec2e5..802b6ddd61 100644
    --- a/helm/chart/router/README.md
    +++ b/helm/chart/router/README.md
    @@ -2,7 +2,7 @@
     
     [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation
     
    -![Version: 1.53.0-rc.0](https://img.shields.io/badge/Version-1.53.0--rc.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.53.0-rc.0](https://img.shields.io/badge/AppVersion-v1.53.0--rc.0-informational?style=flat-square)
    +![Version: 1.53.0-rc.1](https://img.shields.io/badge/Version-1.53.0--rc.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.53.0-rc.1](https://img.shields.io/badge/AppVersion-v1.53.0--rc.1-informational?style=flat-square)
     
     ## Prerequisites
     
    @@ -11,7 +11,7 @@
     ## Get Repo Info
     
     ```console
    -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0-rc.0
    +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0-rc.1
     ```
     
     ## Install Chart
    @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0-rc.0
     **Important:** only helm3 is supported
     
     ```console
    -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0-rc.0 --values my-values.yaml
    +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0-rc.1 --values my-values.yaml
     ```
     
     _See [configuration](#configuration) below._
    diff --git a/licenses.html b/licenses.html
    index 33dbc39daf..1725f7e24d 100644
    --- a/licenses.html
    +++ b/licenses.html
    @@ -41,13 +41,12 @@
                 

    Third Party Licenses

    This page lists the licenses of the dependencies used in the Apollo router.

    - +

    Overview of licenses:

    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    -                                 Apache License
    -                           Version 2.0, January 2004
    -                        https://www.apache.org/licenses/
    -
    -   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -   1. Definitions.
    -
    -      "License" shall mean the terms and conditions for use, reproduction,
    -      and distribution as defined by Sections 1 through 9 of this document.
    -
    -      "Licensor" shall mean the copyright owner or entity authorized by
    -      the copyright owner that is granting the License.
    -
    -      "Legal Entity" shall mean the union of the acting entity and all
    -      other entities that control, are controlled by, or are under common
    -      control with that entity. For the purposes of this definition,
    -      "control" means (i) the power, direct or indirect, to cause the
    -      direction or management of such entity, whether by contract or
    -      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -      outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -      "You" (or "Your") shall mean an individual or Legal Entity
    -      exercising permissions granted by this License.
    -
    -      "Source" form shall mean the preferred form for making modifications,
    -      including but not limited to software source code, documentation
    -      source, and configuration files.
    -
    -      "Object" form shall mean any form resulting from mechanical
    -      transformation or translation of a Source form, including but
    -      not limited to compiled object code, generated documentation,
    -      and conversions to other media types.
    -
    -      "Work" shall mean the work of authorship, whether in Source or
    -      Object form, made available under the License, as indicated by a
    -      copyright notice that is included in or attached to the work
    -      (an example is provided in the Appendix below).
    -
    -      "Derivative Works" shall mean any work, whether in Source or Object
    -      form, that is based on (or derived from) the Work and for which the
    -      editorial revisions, annotations, elaborations, or other modifications
    -      represent, as a whole, an original work of authorship. For the purposes
    -      of this License, Derivative Works shall not include works that remain
    -      separable from, or merely link (or bind by name) to the interfaces of,
    -      the Work and Derivative Works thereof.
    -
    -      "Contribution" shall mean any work of authorship, including
    -      the original version of the Work and any modifications or additions
    -      to that Work or Derivative Works thereof, that is intentionally
    -      submitted to Licensor for inclusion in the Work by the copyright owner
    -      or by an individual or Legal Entity authorized to submit on behalf of
    -      the copyright owner. For the purposes of this definition, "submitted"
    -      means any form of electronic, verbal, or written communication sent
    -      to the Licensor or its representatives, including but not limited to
    -      communication on electronic mailing lists, source code control systems,
    -      and issue tracking systems that are managed by, or on behalf of, the
    -      Licensor for the purpose of discussing and improving the Work, but
    -      excluding communication that is conspicuously marked or otherwise
    -      designated in writing by the copyright owner as "Not a Contribution."
    -
    -      "Contributor" shall mean Licensor and any individual or Legal Entity
    -      on behalf of whom a Contribution has been received by Licensor and
    -      subsequently incorporated within the Work.
    -
    -   2. Grant of Copyright License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      copyright license to reproduce, prepare Derivative Works of,
    -      publicly display, publicly perform, sublicense, and distribute the
    -      Work and such Derivative Works in Source or Object form.
    -
    -   3. Grant of Patent License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      (except as stated in this section) patent license to make, have made,
    -      use, offer to sell, sell, import, and otherwise transfer the Work,
    -      where such license applies only to those patent claims licensable
    -      by such Contributor that are necessarily infringed by their
    -      Contribution(s) alone or by combination of their Contribution(s)
    -      with the Work to which such Contribution(s) was submitted. If You
    -      institute patent litigation against any entity (including a
    -      cross-claim or counterclaim in a lawsuit) alleging that the Work
    -      or a Contribution incorporated within the Work constitutes direct
    -      or contributory patent infringement, then any patent licenses
    -      granted to You under this License for that Work shall terminate
    -      as of the date such litigation is filed.
    -
    -   4. Redistribution. You may reproduce and distribute copies of the
    -      Work or Derivative Works thereof in any medium, with or without
    -      modifications, and in Source or Object form, provided that You
    -      meet the following conditions:
    -
    -      (a) You must give any other recipients of the Work or
    -          Derivative Works a copy of this License; and
    -
    -      (b) You must cause any modified files to carry prominent notices
    -          stating that You changed the files; and
    -
    -      (c) You must retain, in the Source form of any Derivative Works
    -          that You distribute, all copyright, patent, trademark, and
    -          attribution notices from the Source form of the Work,
    -          excluding those notices that do not pertain to any part of
    -          the Derivative Works; and
    -
    -      (d) If the Work includes a "NOTICE" text file as part of its
    -          distribution, then any Derivative Works that You distribute must
    -          include a readable copy of the attribution notices contained
    -          within such NOTICE file, excluding those notices that do not
    -          pertain to any part of the Derivative Works, in at least one
    -          of the following places: within a NOTICE text file distributed
    -          as part of the Derivative Works; within the Source form or
    -          documentation, if provided along with the Derivative Works; or,
    -          within a display generated by the Derivative Works, if and
    -          wherever such third-party notices normally appear. The contents
    -          of the NOTICE file are for informational purposes only and
    -          do not modify the License. You may add Your own attribution
    -          notices within Derivative Works that You distribute, alongside
    -          or as an addendum to the NOTICE text from the Work, provided
    -          that such additional attribution notices cannot be construed
    -          as modifying the License.
    -
    -      You may add Your own copyright statement to Your modifications and
    -      may provide additional or different license terms and conditions
    -      for use, reproduction, or distribution of Your modifications, or
    -      for any such Derivative Works as a whole, provided Your use,
    -      reproduction, and distribution of the Work otherwise complies with
    -      the conditions stated in this License.
    -
    -   5. Submission of Contributions. Unless You explicitly state otherwise,
    -      any Contribution intentionally submitted for inclusion in the Work
    -      by You to the Licensor shall be under the terms and conditions of
    -      this License, without any additional terms or conditions.
    -      Notwithstanding the above, nothing herein shall supersede or modify
    -      the terms of any separate license agreement you may have executed
    -      with Licensor regarding such Contributions.
    -
    -   6. Trademarks. This License does not grant permission to use the trade
    -      names, trademarks, service marks, or product names of the Licensor,
    -      except as required for reasonable and customary use in describing the
    -      origin of the Work and reproducing the content of the NOTICE file.
    -
    -   7. Disclaimer of Warranty. Unless required by applicable law or
    -      agreed to in writing, Licensor provides the Work (and each
    -      Contributor provides its Contributions) on an "AS IS" BASIS,
    -      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -      implied, including, without limitation, any warranties or conditions
    -      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -      PARTICULAR PURPOSE. You are solely responsible for determining the
    -      appropriateness of using or redistributing the Work and assume any
    -      risks associated with Your exercise of permissions under this License.
    -
    -   8. Limitation of Liability. In no event and under no legal theory,
    -      whether in tort (including negligence), contract, or otherwise,
    -      unless required by applicable law (such as deliberate and grossly
    -      negligent acts) or agreed to in writing, shall any Contributor be
    -      liable to You for damages, including any direct, indirect, special,
    -      incidental, or consequential damages of any character arising as a
    -      result of this License or out of the use or inability to use the
    -      Work (including but not limited to damages for loss of goodwill,
    -      work stoppage, computer failure or malfunction, or any and all
    -      other commercial damages or losses), even if such Contributor
    -      has been advised of the possibility of such damages.
    -
    -   9. Accepting Warranty or Additional Liability. While redistributing
    -      the Work or Derivative Works thereof, You may choose to offer,
    -      and charge a fee for, acceptance of support, warranty, indemnity,
    -      or other liability obligations and/or rights consistent with this
    -      License. However, in accepting such obligations, You may act only
    -      on Your own behalf and on Your sole responsibility, not on behalf
    -      of any other Contributor, and only if You agree to indemnify,
    -      defend, and hold each Contributor harmless for any liability
    -      incurred by, or claims asserted against, such Contributor by reason
    -      of your accepting any such warranty or additional liability.
    -
    -   END OF TERMS AND CONDITIONS
     
  • @@ -3175,7 +2989,6 @@

    Used by:

  • clap_builder
  • clap_derive
  • clap_lex
  • -
  • opentelemetry-proto
                                 Apache License
                            Version 2.0, January 2004
@@ -4664,7 +4477,193 @@ 

Used by:

-
                                 Apache License
+                
                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+ +
  • +

    Apache License 2.0

    +

    Used by:

    + +
                                    Apache License
                                Version 2.0, January 2004
                             http://www.apache.org/licenses/
     
    @@ -4840,239 +4839,53 @@ 

    Used by:

    of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +
  • Apache License 2.0

    Used by:

    -
                                    Apache License
    -                           Version 2.0, January 2004
    -                        http://www.apache.org/licenses/
    -
    -   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +                
                                   Apache License
    +                         Version 2.0, January 2004
    +                      http://www.apache.org/licenses/
     
    -   1. Definitions.
    +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
     
    -      "License" shall mean the terms and conditions for use, reproduction,
    -      and distribution as defined by Sections 1 through 9 of this document.
    +1. Definitions.
     
    -      "Licensor" shall mean the copyright owner or entity authorized by
    -      the copyright owner that is granting the License.
    +  "License" shall mean the terms and conditions for use, reproduction,
    +  and distribution as defined by Sections 1 through 9 of this document.
     
    -      "Legal Entity" shall mean the union of the acting entity and all
    -      other entities that control, are controlled by, or are under common
    -      control with that entity. For the purposes of this definition,
    -      "control" means (i) the power, direct or indirect, to cause the
    -      direction or management of such entity, whether by contract or
    -      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -      outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -      "You" (or "Your") shall mean an individual or Legal Entity
    -      exercising permissions granted by this License.
    -
    -      "Source" form shall mean the preferred form for making modifications,
    -      including but not limited to software source code, documentation
    -      source, and configuration files.
    -
    -      "Object" form shall mean any form resulting from mechanical
    -      transformation or translation of a Source form, including but
    -      not limited to compiled object code, generated documentation,
    -      and conversions to other media types.
    -
    -      "Work" shall mean the work of authorship, whether in Source or
    -      Object form, made available under the License, as indicated by a
    -      copyright notice that is included in or attached to the work
    -      (an example is provided in the Appendix below).
    -
    -      "Derivative Works" shall mean any work, whether in Source or Object
    -      form, that is based on (or derived from) the Work and for which the
    -      editorial revisions, annotations, elaborations, or other modifications
    -      represent, as a whole, an original work of authorship. For the purposes
    -      of this License, Derivative Works shall not include works that remain
    -      separable from, or merely link (or bind by name) to the interfaces of,
    -      the Work and Derivative Works thereof.
    -
    -      "Contribution" shall mean any work of authorship, including
    -      the original version of the Work and any modifications or additions
    -      to that Work or Derivative Works thereof, that is intentionally
    -      submitted to Licensor for inclusion in the Work by the copyright owner
    -      or by an individual or Legal Entity authorized to submit on behalf of
    -      the copyright owner. For the purposes of this definition, "submitted"
    -      means any form of electronic, verbal, or written communication sent
    -      to the Licensor or its representatives, including but not limited to
    -      communication on electronic mailing lists, source code control systems,
    -      and issue tracking systems that are managed by, or on behalf of, the
    -      Licensor for the purpose of discussing and improving the Work, but
    -      excluding communication that is conspicuously marked or otherwise
    -      designated in writing by the copyright owner as "Not a Contribution."
    -
    -      "Contributor" shall mean Licensor and any individual or Legal Entity
    -      on behalf of whom a Contribution has been received by Licensor and
    -      subsequently incorporated within the Work.
    -
    -   2. Grant of Copyright License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      copyright license to reproduce, prepare Derivative Works of,
    -      publicly display, publicly perform, sublicense, and distribute the
    -      Work and such Derivative Works in Source or Object form.
    -
    -   3. Grant of Patent License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      (except as stated in this section) patent license to make, have made,
    -      use, offer to sell, sell, import, and otherwise transfer the Work,
    -      where such license applies only to those patent claims licensable
    -      by such Contributor that are necessarily infringed by their
    -      Contribution(s) alone or by combination of their Contribution(s)
    -      with the Work to which such Contribution(s) was submitted. If You
    -      institute patent litigation against any entity (including a
    -      cross-claim or counterclaim in a lawsuit) alleging that the Work
    -      or a Contribution incorporated within the Work constitutes direct
    -      or contributory patent infringement, then any patent licenses
    -      granted to You under this License for that Work shall terminate
    -      as of the date such litigation is filed.
    -
    -   4. Redistribution. You may reproduce and distribute copies of the
    -      Work or Derivative Works thereof in any medium, with or without
    -      modifications, and in Source or Object form, provided that You
    -      meet the following conditions:
    -
    -      (a) You must give any other recipients of the Work or
    -          Derivative Works a copy of this License; and
    -
    -      (b) You must cause any modified files to carry prominent notices
    -          stating that You changed the files; and
    -
    -      (c) You must retain, in the Source form of any Derivative Works
    -          that You distribute, all copyright, patent, trademark, and
    -          attribution notices from the Source form of the Work,
    -          excluding those notices that do not pertain to any part of
    -          the Derivative Works; and
    -
    -      (d) If the Work includes a "NOTICE" text file as part of its
    -          distribution, then any Derivative Works that You distribute must
    -          include a readable copy of the attribution notices contained
    -          within such NOTICE file, excluding those notices that do not
    -          pertain to any part of the Derivative Works, in at least one
    -          of the following places: within a NOTICE text file distributed
    -          as part of the Derivative Works; within the Source form or
    -          documentation, if provided along with the Derivative Works; or,
    -          within a display generated by the Derivative Works, if and
    -          wherever such third-party notices normally appear. The contents
    -          of the NOTICE file are for informational purposes only and
    -          do not modify the License. You may add Your own attribution
    -          notices within Derivative Works that You distribute, alongside
    -          or as an addendum to the NOTICE text from the Work, provided
    -          that such additional attribution notices cannot be construed
    -          as modifying the License.
    -
    -      You may add Your own copyright statement to Your modifications and
    -      may provide additional or different license terms and conditions
    -      for use, reproduction, or distribution of Your modifications, or
    -      for any such Derivative Works as a whole, provided Your use,
    -      reproduction, and distribution of the Work otherwise complies with
    -      the conditions stated in this License.
    -
    -   5. Submission of Contributions. Unless You explicitly state otherwise,
    -      any Contribution intentionally submitted for inclusion in the Work
    -      by You to the Licensor shall be under the terms and conditions of
    -      this License, without any additional terms or conditions.
    -      Notwithstanding the above, nothing herein shall supersede or modify
    -      the terms of any separate license agreement you may have executed
    -      with Licensor regarding such Contributions.
    -
    -   6. Trademarks. This License does not grant permission to use the trade
    -      names, trademarks, service marks, or product names of the Licensor,
    -      except as required for reasonable and customary use in describing the
    -      origin of the Work and reproducing the content of the NOTICE file.
    -
    -   7. Disclaimer of Warranty. Unless required by applicable law or
    -      agreed to in writing, Licensor provides the Work (and each
    -      Contributor provides its Contributions) on an "AS IS" BASIS,
    -      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -      implied, including, without limitation, any warranties or conditions
    -      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -      PARTICULAR PURPOSE. You are solely responsible for determining the
    -      appropriateness of using or redistributing the Work and assume any
    -      risks associated with Your exercise of permissions under this License.
    -
    -   8. Limitation of Liability. In no event and under no legal theory,
    -      whether in tort (including negligence), contract, or otherwise,
    -      unless required by applicable law (such as deliberate and grossly
    -      negligent acts) or agreed to in writing, shall any Contributor be
    -      liable to You for damages, including any direct, indirect, special,
    -      incidental, or consequential damages of any character arising as a
    -      result of this License or out of the use or inability to use the
    -      Work (including but not limited to damages for loss of goodwill,
    -      work stoppage, computer failure or malfunction, or any and all
    -      other commercial damages or losses), even if such Contributor
    -      has been advised of the possibility of such damages.
    -
    -   9. Accepting Warranty or Additional Liability. While redistributing
    -      the Work or Derivative Works thereof, You may choose to offer,
    -      and charge a fee for, acceptance of support, warranty, indemnity,
    -      or other liability obligations and/or rights consistent with this
    -      License. However, in accepting such obligations, You may act only
    -      on Your own behalf and on Your sole responsibility, not on behalf
    -      of any other Contributor, and only if You agree to indemnify,
    -      defend, and hold each Contributor harmless for any liability
    -      incurred by, or claims asserted against, such Contributor by reason
    -      of your accepting any such warranty or additional liability.
    -
    -   END OF TERMS AND CONDITIONS
    -
    -   APPENDIX: How to apply the Apache License to your work.
    -
    -      To apply the Apache License to your work, attach the following
    -      boilerplate notice, with the fields enclosed by brackets "{}"
    -      replaced with your own identifying information. (Don't include
    -      the brackets!)  The text should be enclosed in the appropriate
    -      comment syntax for the file format. We also recommend that a
    -      file or class name and description of purpose be included on the
    -      same "printed page" as the copyright notice for easier
    -      identification within third-party archives.
    -
    -   Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
    -
    -   Licensed under the Apache License, Version 2.0 (the "License");
    -   you may not use this file except in compliance with the License.
    -   You may obtain a copy of the License at
    -
    -       http://www.apache.org/licenses/LICENSE-2.0
    -
    -   Unless required by applicable law or agreed to in writing, software
    -   distributed under the License is distributed on an "AS IS" BASIS,
    -   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -   See the License for the specific language governing permissions and
    -   limitations under the License.
    -
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
                                   Apache License
    -                         Version 2.0, January 2004
    -                      http://www.apache.org/licenses/
    -
    -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -1. Definitions.
    -
    -  "License" shall mean the terms and conditions for use, reproduction,
    -  and distribution as defined by Sections 1 through 9 of this document.
    -
    -  "Licensor" shall mean the copyright owner or entity authorized by
    -  the copyright owner that is granting the License.
    +  "Licensor" shall mean the copyright owner or entity authorized by
    +  the copyright owner that is granting the License.
     
       "Legal Entity" shall mean the union of the acting entity and all
       other entities that control, are controlled by, or are under common
    @@ -5482,7 +5295,6 @@ 

    Used by:

  • utf-8
  • utf8parse
  • wasm-streams
  • -
  • zerocopy
  •                               Apache License
                             Version 2.0, January 2004
    @@ -8434,7 +8246,6 @@ 

    Used by:

  • derive_arbitrary
  • displaydoc
  • either
  • -
  • envmnt
  • equivalent
  • error-chain
  • event-listener
  • @@ -8446,7 +8257,6 @@

    Used by:

  • fnv
  • form_urlencoded
  • fraction
  • -
  • fsio
  • futures-lite
  • futures-timer
  • gimli
  • @@ -10242,833 +10052,833 @@

    Used by:

  • allocator-api2
  • thin-vec
  • -
                                  Apache License
    -                        Version 2.0, January 2004
    -                     http://www.apache.org/licenses/
    +                
                                  Apache License
    +                        Version 2.0, January 2004
    +                     http://www.apache.org/licenses/
    +
    +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +1. Definitions.
    +
    +   "License" shall mean the terms and conditions for use, reproduction,
    +   and distribution as defined by Sections 1 through 9 of this document.
    +
    +   "Licensor" shall mean the copyright owner or entity authorized by
    +   the copyright owner that is granting the License.
    +
    +   "Legal Entity" shall mean the union of the acting entity and all
    +   other entities that control, are controlled by, or are under common
    +   control with that entity. For the purposes of this definition,
    +   "control" means (i) the power, direct or indirect, to cause the
    +   direction or management of such entity, whether by contract or
    +   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +   outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +   "You" (or "Your") shall mean an individual or Legal Entity
    +   exercising permissions granted by this License.
    +
    +   "Source" form shall mean the preferred form for making modifications,
    +   including but not limited to software source code, documentation
    +   source, and configuration files.
    +
    +   "Object" form shall mean any form resulting from mechanical
    +   transformation or translation of a Source form, including but
    +   not limited to compiled object code, generated documentation,
    +   and conversions to other media types.
    +
    +   "Work" shall mean the work of authorship, whether in Source or
    +   Object form, made available under the License, as indicated by a
    +   copyright notice that is included in or attached to the work
    +   (an example is provided in the Appendix below).
    +
    +   "Derivative Works" shall mean any work, whether in Source or Object
    +   form, that is based on (or derived from) the Work and for which the
    +   editorial revisions, annotations, elaborations, or other modifications
    +   represent, as a whole, an original work of authorship. For the purposes
    +   of this License, Derivative Works shall not include works that remain
    +   separable from, or merely link (or bind by name) to the interfaces of,
    +   the Work and Derivative Works thereof.
    +
    +   "Contribution" shall mean any work of authorship, including
    +   the original version of the Work and any modifications or additions
    +   to that Work or Derivative Works thereof, that is intentionally
    +   submitted to Licensor for inclusion in the Work by the copyright owner
    +   or by an individual or Legal Entity authorized to submit on behalf of
    +   the copyright owner. For the purposes of this definition, "submitted"
    +   means any form of electronic, verbal, or written communication sent
    +   to the Licensor or its representatives, including but not limited to
    +   communication on electronic mailing lists, source code control systems,
    +   and issue tracking systems that are managed by, or on behalf of, the
    +   Licensor for the purpose of discussing and improving the Work, but
    +   excluding communication that is conspicuously marked or otherwise
    +   designated in writing by the copyright owner as "Not a Contribution."
    +
    +   "Contributor" shall mean Licensor and any individual or Legal Entity
    +   on behalf of whom a Contribution has been received by Licensor and
    +   subsequently incorporated within the Work.
    +
    +2. Grant of Copyright License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   copyright license to reproduce, prepare Derivative Works of,
    +   publicly display, publicly perform, sublicense, and distribute the
    +   Work and such Derivative Works in Source or Object form.
    +
    +3. Grant of Patent License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   (except as stated in this section) patent license to make, have made,
    +   use, offer to sell, sell, import, and otherwise transfer the Work,
    +   where such license applies only to those patent claims licensable
    +   by such Contributor that are necessarily infringed by their
    +   Contribution(s) alone or by combination of their Contribution(s)
    +   with the Work to which such Contribution(s) was submitted. If You
    +   institute patent litigation against any entity (including a
    +   cross-claim or counterclaim in a lawsuit) alleging that the Work
    +   or a Contribution incorporated within the Work constitutes direct
    +   or contributory patent infringement, then any patent licenses
    +   granted to You under this License for that Work shall terminate
    +   as of the date such litigation is filed.
    +
    +4. Redistribution. You may reproduce and distribute copies of the
    +   Work or Derivative Works thereof in any medium, with or without
    +   modifications, and in Source or Object form, provided that You
    +   meet the following conditions:
    +
    +   (a) You must give any other recipients of the Work or
    +       Derivative Works a copy of this License; and
    +
    +   (b) You must cause any modified files to carry prominent notices
    +       stating that You changed the files; and
    +
    +   (c) You must retain, in the Source form of any Derivative Works
    +       that You distribute, all copyright, patent, trademark, and
    +       attribution notices from the Source form of the Work,
    +       excluding those notices that do not pertain to any part of
    +       the Derivative Works; and
    +
    +   (d) If the Work includes a "NOTICE" text file as part of its
    +       distribution, then any Derivative Works that You distribute must
    +       include a readable copy of the attribution notices contained
    +       within such NOTICE file, excluding those notices that do not
    +       pertain to any part of the Derivative Works, in at least one
    +       of the following places: within a NOTICE text file distributed
    +       as part of the Derivative Works; within the Source form or
    +       documentation, if provided along with the Derivative Works; or,
    +       within a display generated by the Derivative Works, if and
    +       wherever such third-party notices normally appear. The contents
    +       of the NOTICE file are for informational purposes only and
    +       do not modify the License. You may add Your own attribution
    +       notices within Derivative Works that You distribute, alongside
    +       or as an addendum to the NOTICE text from the Work, provided
    +       that such additional attribution notices cannot be construed
    +       as modifying the License.
    +
    +   You may add Your own copyright statement to Your modifications and
    +   may provide additional or different license terms and conditions
    +   for use, reproduction, or distribution of Your modifications, or
    +   for any such Derivative Works as a whole, provided Your use,
    +   reproduction, and distribution of the Work otherwise complies with
    +   the conditions stated in this License.
    +
    +5. Submission of Contributions. Unless You explicitly state otherwise,
    +   any Contribution intentionally submitted for inclusion in the Work
    +   by You to the Licensor shall be under the terms and conditions of
    +   this License, without any additional terms or conditions.
    +   Notwithstanding the above, nothing herein shall supersede or modify
    +   the terms of any separate license agreement you may have executed
    +   with Licensor regarding such Contributions.
    +
    +6. Trademarks. This License does not grant permission to use the trade
    +   names, trademarks, service marks, or product names of the Licensor,
    +   except as required for reasonable and customary use in describing the
    +   origin of the Work and reproducing the content of the NOTICE file.
    +
    +7. Disclaimer of Warranty. Unless required by applicable law or
    +   agreed to in writing, Licensor provides the Work (and each
    +   Contributor provides its Contributions) on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +   implied, including, without limitation, any warranties or conditions
    +   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +   PARTICULAR PURPOSE. You are solely responsible for determining the
    +   appropriateness of using or redistributing the Work and assume any
    +   risks associated with Your exercise of permissions under this License.
    +
    +8. Limitation of Liability. In no event and under no legal theory,
    +   whether in tort (including negligence), contract, or otherwise,
    +   unless required by applicable law (such as deliberate and grossly
    +   negligent acts) or agreed to in writing, shall any Contributor be
    +   liable to You for damages, including any direct, indirect, special,
    +   incidental, or consequential damages of any character arising as a
    +   result of this License or out of the use or inability to use the
    +   Work (including but not limited to damages for loss of goodwill,
    +   work stoppage, computer failure or malfunction, or any and all
    +   other commercial damages or losses), even if such Contributor
    +   has been advised of the possibility of such damages.
    +
    +9. Accepting Warranty or Additional Liability. While redistributing
    +   the Work or Derivative Works thereof, You may choose to offer,
    +   and charge a fee for, acceptance of support, warranty, indemnity,
    +   or other liability obligations and/or rights consistent with this
    +   License. However, in accepting such obligations, You may act only
    +   on Your own behalf and on Your sole responsibility, not on behalf
    +   of any other Contributor, and only if You agree to indemnify,
    +   defend, and hold each Contributor harmless for any liability
    +   incurred by, or claims asserted against, such Contributor by reason
    +   of your accepting any such warranty or additional liability.
    +
    +END OF TERMS AND CONDITIONS
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
                                  Apache License
    +                        Version 2.0, January 2004
    +                     http://www.apache.org/licenses/
    +
    +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +1. Definitions.
    +
    +   "License" shall mean the terms and conditions for use, reproduction,
    +   and distribution as defined by Sections 1 through 9 of this document.
    +
    +   "Licensor" shall mean the copyright owner or entity authorized by
    +   the copyright owner that is granting the License.
    +
    +   "Legal Entity" shall mean the union of the acting entity and all
    +   other entities that control, are controlled by, or are under common
    +   control with that entity. For the purposes of this definition,
    +   "control" means (i) the power, direct or indirect, to cause the
    +   direction or management of such entity, whether by contract or
    +   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +   outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +   "You" (or "Your") shall mean an individual or Legal Entity
    +   exercising permissions granted by this License.
    +
    +   "Source" form shall mean the preferred form for making modifications,
    +   including but not limited to software source code, documentation
    +   source, and configuration files.
    +
    +   "Object" form shall mean any form resulting from mechanical
    +   transformation or translation of a Source form, including but
    +   not limited to compiled object code, generated documentation,
    +   and conversions to other media types.
    +
    +   "Work" shall mean the work of authorship, whether in Source or
    +   Object form, made available under the License, as indicated by a
    +   copyright notice that is included in or attached to the work
    +   (an example is provided in the Appendix below).
    +
    +   "Derivative Works" shall mean any work, whether in Source or Object
    +   form, that is based on (or derived from) the Work and for which the
    +   editorial revisions, annotations, elaborations, or other modifications
    +   represent, as a whole, an original work of authorship. For the purposes
    +   of this License, Derivative Works shall not include works that remain
    +   separable from, or merely link (or bind by name) to the interfaces of,
    +   the Work and Derivative Works thereof.
    +
    +   "Contribution" shall mean any work of authorship, including
    +   the original version of the Work and any modifications or additions
    +   to that Work or Derivative Works thereof, that is intentionally
    +   submitted to Licensor for inclusion in the Work by the copyright owner
    +   or by an individual or Legal Entity authorized to submit on behalf of
    +   the copyright owner. For the purposes of this definition, "submitted"
    +   means any form of electronic, verbal, or written communication sent
    +   to the Licensor or its representatives, including but not limited to
    +   communication on electronic mailing lists, source code control systems,
    +   and issue tracking systems that are managed by, or on behalf of, the
    +   Licensor for the purpose of discussing and improving the Work, but
    +   excluding communication that is conspicuously marked or otherwise
    +   designated in writing by the copyright owner as "Not a Contribution."
    +
    +   "Contributor" shall mean Licensor and any individual or Legal Entity
    +   on behalf of whom a Contribution has been received by Licensor and
    +   subsequently incorporated within the Work.
    +
    +2. Grant of Copyright License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   copyright license to reproduce, prepare Derivative Works of,
    +   publicly display, publicly perform, sublicense, and distribute the
    +   Work and such Derivative Works in Source or Object form.
    +
    +3. Grant of Patent License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   (except as stated in this section) patent license to make, have made,
    +   use, offer to sell, sell, import, and otherwise transfer the Work,
    +   where such license applies only to those patent claims licensable
    +   by such Contributor that are necessarily infringed by their
    +   Contribution(s) alone or by combination of their Contribution(s)
    +   with the Work to which such Contribution(s) was submitted. If You
    +   institute patent litigation against any entity (including a
    +   cross-claim or counterclaim in a lawsuit) alleging that the Work
    +   or a Contribution incorporated within the Work constitutes direct
    +   or contributory patent infringement, then any patent licenses
    +   granted to You under this License for that Work shall terminate
    +   as of the date such litigation is filed.
    +
    +4. Redistribution. You may reproduce and distribute copies of the
    +   Work or Derivative Works thereof in any medium, with or without
    +   modifications, and in Source or Object form, provided that You
    +   meet the following conditions:
    +
    +   (a) You must give any other recipients of the Work or
    +       Derivative Works a copy of this License; and
    +
    +   (b) You must cause any modified files to carry prominent notices
    +       stating that You changed the files; and
    +
    +   (c) You must retain, in the Source form of any Derivative Works
    +       that You distribute, all copyright, patent, trademark, and
    +       attribution notices from the Source form of the Work,
    +       excluding those notices that do not pertain to any part of
    +       the Derivative Works; and
    +
    +   (d) If the Work includes a "NOTICE" text file as part of its
    +       distribution, then any Derivative Works that You distribute must
    +       include a readable copy of the attribution notices contained
    +       within such NOTICE file, excluding those notices that do not
    +       pertain to any part of the Derivative Works, in at least one
    +       of the following places: within a NOTICE text file distributed
    +       as part of the Derivative Works; within the Source form or
    +       documentation, if provided along with the Derivative Works; or,
    +       within a display generated by the Derivative Works, if and
    +       wherever such third-party notices normally appear. The contents
    +       of the NOTICE file are for informational purposes only and
    +       do not modify the License. You may add Your own attribution
    +       notices within Derivative Works that You distribute, alongside
    +       or as an addendum to the NOTICE text from the Work, provided
    +       that such additional attribution notices cannot be construed
    +       as modifying the License.
    +
    +   You may add Your own copyright statement to Your modifications and
    +   may provide additional or different license terms and conditions
    +   for use, reproduction, or distribution of Your modifications, or
    +   for any such Derivative Works as a whole, provided Your use,
    +   reproduction, and distribution of the Work otherwise complies with
    +   the conditions stated in this License.
    +
    +5. Submission of Contributions. Unless You explicitly state otherwise,
    +   any Contribution intentionally submitted for inclusion in the Work
    +   by You to the Licensor shall be under the terms and conditions of
    +   this License, without any additional terms or conditions.
    +   Notwithstanding the above, nothing herein shall supersede or modify
    +   the terms of any separate license agreement you may have executed
    +   with Licensor regarding such Contributions.
    +
    +6. Trademarks. This License does not grant permission to use the trade
    +   names, trademarks, service marks, or product names of the Licensor,
    +   except as required for reasonable and customary use in describing the
    +   origin of the Work and reproducing the content of the NOTICE file.
    +
    +7. Disclaimer of Warranty. Unless required by applicable law or
    +   agreed to in writing, Licensor provides the Work (and each
    +   Contributor provides its Contributions) on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +   implied, including, without limitation, any warranties or conditions
    +   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +   PARTICULAR PURPOSE. You are solely responsible for determining the
    +   appropriateness of using or redistributing the Work and assume any
    +   risks associated with Your exercise of permissions under this License.
    +
    +8. Limitation of Liability. In no event and under no legal theory,
    +   whether in tort (including negligence), contract, or otherwise,
    +   unless required by applicable law (such as deliberate and grossly
    +   negligent acts) or agreed to in writing, shall any Contributor be
    +   liable to You for damages, including any direct, indirect, special,
    +   incidental, or consequential damages of any character arising as a
    +   result of this License or out of the use or inability to use the
    +   Work (including but not limited to damages for loss of goodwill,
    +   work stoppage, computer failure or malfunction, or any and all
    +   other commercial damages or losses), even if such Contributor
    +   has been advised of the possibility of such damages.
    +
    +9. Accepting Warranty or Additional Liability. While redistributing
    +   the Work or Derivative Works thereof, You may choose to offer,
    +   and charge a fee for, acceptance of support, warranty, indemnity,
    +   or other liability obligations and/or rights consistent with this
    +   License. However, in accepting such obligations, You may act only
    +   on Your own behalf and on Your sole responsibility, not on behalf
    +   of any other Contributor, and only if You agree to indemnify,
    +   defend, and hold each Contributor harmless for any liability
    +   incurred by, or claims asserted against, such Contributor by reason
    +   of your accepting any such warranty or additional liability.
    +
    +END OF TERMS AND CONDITIONS
    +
    +APPENDIX: How to apply the Apache License to your work.
    +
    +   To apply the Apache License to your work, attach the following
    +   boilerplate notice, with the fields enclosed by brackets "[]"
    +   replaced with your own identifying information. (Don't include
    +   the brackets!)  The text should be enclosed in the appropriate
    +   comment syntax for the file format. We also recommend that a
    +   file or class name and description of purpose be included on the
    +   same "printed page" as the copyright notice for easier
    +   identification within third-party archives.
    +
    +Copyright [yyyy] [name of copyright owner]
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
                                  Apache License
    +                        Version 2.0, January 2004
    +                     http://www.apache.org/licenses/
    +
    +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +1. Definitions.
    +
    +   "License" shall mean the terms and conditions for use, reproduction,
    +   and distribution as defined by Sections 1 through 9 of this document.
    +
    +   "Licensor" shall mean the copyright owner or entity authorized by
    +   the copyright owner that is granting the License.
    +
    +   "Legal Entity" shall mean the union of the acting entity and all
    +   other entities that control, are controlled by, or are under common
    +   control with that entity. For the purposes of this definition,
    +   "control" means (i) the power, direct or indirect, to cause the
    +   direction or management of such entity, whether by contract or
    +   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +   outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +   "You" (or "Your") shall mean an individual or Legal Entity
    +   exercising permissions granted by this License.
    +
    +   "Source" form shall mean the preferred form for making modifications,
    +   including but not limited to software source code, documentation
    +   source, and configuration files.
    +
    +   "Object" form shall mean any form resulting from mechanical
    +   transformation or translation of a Source form, including but
    +   not limited to compiled object code, generated documentation,
    +   and conversions to other media types.
    +
    +   "Work" shall mean the work of authorship, whether in Source or
    +   Object form, made available under the License, as indicated by a
    +   copyright notice that is included in or attached to the work
    +   (an example is provided in the Appendix below).
    +
    +   "Derivative Works" shall mean any work, whether in Source or Object
    +   form, that is based on (or derived from) the Work and for which the
    +   editorial revisions, annotations, elaborations, or other modifications
    +   represent, as a whole, an original work of authorship. For the purposes
    +   of this License, Derivative Works shall not include works that remain
    +   separable from, or merely link (or bind by name) to the interfaces of,
    +   the Work and Derivative Works thereof.
    +
    +   "Contribution" shall mean any work of authorship, including
    +   the original version of the Work and any modifications or additions
    +   to that Work or Derivative Works thereof, that is intentionally
    +   submitted to Licensor for inclusion in the Work by the copyright owner
    +   or by an individual or Legal Entity authorized to submit on behalf of
    +   the copyright owner. For the purposes of this definition, "submitted"
    +   means any form of electronic, verbal, or written communication sent
    +   to the Licensor or its representatives, including but not limited to
    +   communication on electronic mailing lists, source code control systems,
    +   and issue tracking systems that are managed by, or on behalf of, the
    +   Licensor for the purpose of discussing and improving the Work, but
    +   excluding communication that is conspicuously marked or otherwise
    +   designated in writing by the copyright owner as "Not a Contribution."
    +
    +   "Contributor" shall mean Licensor and any individual or Legal Entity
    +   on behalf of whom a Contribution has been received by Licensor and
    +   subsequently incorporated within the Work.
    +
    +2. Grant of Copyright License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   copyright license to reproduce, prepare Derivative Works of,
    +   publicly display, publicly perform, sublicense, and distribute the
    +   Work and such Derivative Works in Source or Object form.
    +
    +3. Grant of Patent License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   (except as stated in this section) patent license to make, have made,
    +   use, offer to sell, sell, import, and otherwise transfer the Work,
    +   where such license applies only to those patent claims licensable
    +   by such Contributor that are necessarily infringed by their
    +   Contribution(s) alone or by combination of their Contribution(s)
    +   with the Work to which such Contribution(s) was submitted. If You
    +   institute patent litigation against any entity (including a
    +   cross-claim or counterclaim in a lawsuit) alleging that the Work
    +   or a Contribution incorporated within the Work constitutes direct
    +   or contributory patent infringement, then any patent licenses
    +   granted to You under this License for that Work shall terminate
    +   as of the date such litigation is filed.
    +
    +4. Redistribution. You may reproduce and distribute copies of the
    +   Work or Derivative Works thereof in any medium, with or without
    +   modifications, and in Source or Object form, provided that You
    +   meet the following conditions:
    +
    +   (a) You must give any other recipients of the Work or
    +       Derivative Works a copy of this License; and
    +
    +   (b) You must cause any modified files to carry prominent notices
    +       stating that You changed the files; and
    +
    +   (c) You must retain, in the Source form of any Derivative Works
    +       that You distribute, all copyright, patent, trademark, and
    +       attribution notices from the Source form of the Work,
    +       excluding those notices that do not pertain to any part of
    +       the Derivative Works; and
    +
    +   (d) If the Work includes a "NOTICE" text file as part of its
    +       distribution, then any Derivative Works that You distribute must
    +       include a readable copy of the attribution notices contained
    +       within such NOTICE file, excluding those notices that do not
    +       pertain to any part of the Derivative Works, in at least one
    +       of the following places: within a NOTICE text file distributed
    +       as part of the Derivative Works; within the Source form or
    +       documentation, if provided along with the Derivative Works; or,
    +       within a display generated by the Derivative Works, if and
    +       wherever such third-party notices normally appear. The contents
    +       of the NOTICE file are for informational purposes only and
    +       do not modify the License. You may add Your own attribution
    +       notices within Derivative Works that You distribute, alongside
    +       or as an addendum to the NOTICE text from the Work, provided
    +       that such additional attribution notices cannot be construed
    +       as modifying the License.
    +
    +   You may add Your own copyright statement to Your modifications and
    +   may provide additional or different license terms and conditions
    +   for use, reproduction, or distribution of Your modifications, or
    +   for any such Derivative Works as a whole, provided Your use,
    +   reproduction, and distribution of the Work otherwise complies with
    +   the conditions stated in this License.
    +
    +5. Submission of Contributions. Unless You explicitly state otherwise,
    +   any Contribution intentionally submitted for inclusion in the Work
    +   by You to the Licensor shall be under the terms and conditions of
    +   this License, without any additional terms or conditions.
    +   Notwithstanding the above, nothing herein shall supersede or modify
    +   the terms of any separate license agreement you may have executed
    +   with Licensor regarding such Contributions.
    +
    +6. Trademarks. This License does not grant permission to use the trade
    +   names, trademarks, service marks, or product names of the Licensor,
    +   except as required for reasonable and customary use in describing the
    +   origin of the Work and reproducing the content of the NOTICE file.
    +
    +7. Disclaimer of Warranty. Unless required by applicable law or
    +   agreed to in writing, Licensor provides the Work (and each
    +   Contributor provides its Contributions) on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +   implied, including, without limitation, any warranties or conditions
    +   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +   PARTICULAR PURPOSE. You are solely responsible for determining the
    +   appropriateness of using or redistributing the Work and assume any
    +   risks associated with Your exercise of permissions under this License.
    +
    +8. Limitation of Liability. In no event and under no legal theory,
    +   whether in tort (including negligence), contract, or otherwise,
    +   unless required by applicable law (such as deliberate and grossly
    +   negligent acts) or agreed to in writing, shall any Contributor be
    +   liable to You for damages, including any direct, indirect, special,
    +   incidental, or consequential damages of any character arising as a
    +   result of this License or out of the use or inability to use the
    +   Work (including but not limited to damages for loss of goodwill,
    +   work stoppage, computer failure or malfunction, or any and all
    +   other commercial damages or losses), even if such Contributor
    +   has been advised of the possibility of such damages.
    +
    +9. Accepting Warranty or Additional Liability. While redistributing
    +   the Work or Derivative Works thereof, You may choose to offer,
    +   and charge a fee for, acceptance of support, warranty, indemnity,
    +   or other liability obligations and/or rights consistent with this
    +   License. However, in accepting such obligations, You may act only
    +   on Your own behalf and on Your sole responsibility, not on behalf
    +   of any other Contributor, and only if You agree to indemnify,
    +   defend, and hold each Contributor harmless for any liability
    +   incurred by, or claims asserted against, such Contributor by reason
    +   of your accepting any such warranty or additional liability.
    +
    +END OF TERMS AND CONDITIONS
    +
    +APPENDIX: How to apply the Apache License to your work.
    +
    +   To apply the Apache License to your work, attach the following
    +   boilerplate notice, with the fields enclosed by brackets "[]"
    +   replaced with your own identifying information. (Don't include
    +   the brackets!)  The text should be enclosed in the appropriate
    +   comment syntax for the file format. We also recommend that a
    +   file or class name and description of purpose be included on the
    +   same "printed page" as the copyright notice for easier
    +   identification within third-party archives.
    +
    +Copyright [yyyy] [name of copyright owner]
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
                                  Apache License
    +                        Version 2.0, January 2004
    +                     http://www.apache.org/licenses/
    +
    +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +1. Definitions.
    +
    +   "License" shall mean the terms and conditions for use, reproduction,
    +   and distribution as defined by Sections 1 through 9 of this document.
    +
    +   "Licensor" shall mean the copyright owner or entity authorized by
    +   the copyright owner that is granting the License.
    +
    +   "Legal Entity" shall mean the union of the acting entity and all
    +   other entities that control, are controlled by, or are under common
    +   control with that entity. For the purposes of this definition,
    +   "control" means (i) the power, direct or indirect, to cause the
    +   direction or management of such entity, whether by contract or
    +   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +   outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +   "You" (or "Your") shall mean an individual or Legal Entity
    +   exercising permissions granted by this License.
    +
    +   "Source" form shall mean the preferred form for making modifications,
    +   including but not limited to software source code, documentation
    +   source, and configuration files.
    +
    +   "Object" form shall mean any form resulting from mechanical
    +   transformation or translation of a Source form, including but
    +   not limited to compiled object code, generated documentation,
    +   and conversions to other media types.
    +
    +   "Work" shall mean the work of authorship, whether in Source or
    +   Object form, made available under the License, as indicated by a
    +   copyright notice that is included in or attached to the work
    +   (an example is provided in the Appendix below).
    +
    +   "Derivative Works" shall mean any work, whether in Source or Object
    +   form, that is based on (or derived from) the Work and for which the
    +   editorial revisions, annotations, elaborations, or other modifications
    +   represent, as a whole, an original work of authorship. For the purposes
    +   of this License, Derivative Works shall not include works that remain
    +   separable from, or merely link (or bind by name) to the interfaces of,
    +   the Work and Derivative Works thereof.
    +
    +   "Contribution" shall mean any work of authorship, including
    +   the original version of the Work and any modifications or additions
    +   to that Work or Derivative Works thereof, that is intentionally
    +   submitted to Licensor for inclusion in the Work by the copyright owner
    +   or by an individual or Legal Entity authorized to submit on behalf of
    +   the copyright owner. For the purposes of this definition, "submitted"
    +   means any form of electronic, verbal, or written communication sent
    +   to the Licensor or its representatives, including but not limited to
    +   communication on electronic mailing lists, source code control systems,
    +   and issue tracking systems that are managed by, or on behalf of, the
    +   Licensor for the purpose of discussing and improving the Work, but
    +   excluding communication that is conspicuously marked or otherwise
    +   designated in writing by the copyright owner as "Not a Contribution."
    +
    +   "Contributor" shall mean Licensor and any individual or Legal Entity
    +   on behalf of whom a Contribution has been received by Licensor and
    +   subsequently incorporated within the Work.
    +
    +2. Grant of Copyright License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   copyright license to reproduce, prepare Derivative Works of,
    +   publicly display, publicly perform, sublicense, and distribute the
    +   Work and such Derivative Works in Source or Object form.
    +
    +3. Grant of Patent License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   (except as stated in this section) patent license to make, have made,
    +   use, offer to sell, sell, import, and otherwise transfer the Work,
    +   where such license applies only to those patent claims licensable
    +   by such Contributor that are necessarily infringed by their
    +   Contribution(s) alone or by combination of their Contribution(s)
    +   with the Work to which such Contribution(s) was submitted. If You
    +   institute patent litigation against any entity (including a
    +   cross-claim or counterclaim in a lawsuit) alleging that the Work
    +   or a Contribution incorporated within the Work constitutes direct
    +   or contributory patent infringement, then any patent licenses
    +   granted to You under this License for that Work shall terminate
    +   as of the date such litigation is filed.
    +
    +4. Redistribution. You may reproduce and distribute copies of the
    +   Work or Derivative Works thereof in any medium, with or without
    +   modifications, and in Source or Object form, provided that You
    +   meet the following conditions:
    +
    +   (a) You must give any other recipients of the Work or
    +       Derivative Works a copy of this License; and
    +
    +   (b) You must cause any modified files to carry prominent notices
    +       stating that You changed the files; and
    +
    +   (c) You must retain, in the Source form of any Derivative Works
    +       that You distribute, all copyright, patent, trademark, and
    +       attribution notices from the Source form of the Work,
    +       excluding those notices that do not pertain to any part of
    +       the Derivative Works; and
    +
    +   (d) If the Work includes a "NOTICE" text file as part of its
    +       distribution, then any Derivative Works that You distribute must
    +       include a readable copy of the attribution notices contained
    +       within such NOTICE file, excluding those notices that do not
    +       pertain to any part of the Derivative Works, in at least one
    +       of the following places: within a NOTICE text file distributed
    +       as part of the Derivative Works; within the Source form or
    +       documentation, if provided along with the Derivative Works; or,
    +       within a display generated by the Derivative Works, if and
    +       wherever such third-party notices normally appear. The contents
    +       of the NOTICE file are for informational purposes only and
    +       do not modify the License. You may add Your own attribution
    +       notices within Derivative Works that You distribute, alongside
    +       or as an addendum to the NOTICE text from the Work, provided
    +       that such additional attribution notices cannot be construed
    +       as modifying the License.
    +
    +   You may add Your own copyright statement to Your modifications and
    +   may provide additional or different license terms and conditions
    +   for use, reproduction, or distribution of Your modifications, or
    +   for any such Derivative Works as a whole, provided Your use,
    +   reproduction, and distribution of the Work otherwise complies with
    +   the conditions stated in this License.
    +
    +5. Submission of Contributions. Unless You explicitly state otherwise,
    +   any Contribution intentionally submitted for inclusion in the Work
    +   by You to the Licensor shall be under the terms and conditions of
    +   this License, without any additional terms or conditions.
    +   Notwithstanding the above, nothing herein shall supersede or modify
    +   the terms of any separate license agreement you may have executed
    +   with Licensor regarding such Contributions.
    +
    +6. Trademarks. This License does not grant permission to use the trade
    +   names, trademarks, service marks, or product names of the Licensor,
    +   except as required for reasonable and customary use in describing the
    +   origin of the Work and reproducing the content of the NOTICE file.
    +
    +7. Disclaimer of Warranty. Unless required by applicable law or
    +   agreed to in writing, Licensor provides the Work (and each
    +   Contributor provides its Contributions) on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +   implied, including, without limitation, any warranties or conditions
    +   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +   PARTICULAR PURPOSE. You are solely responsible for determining the
    +   appropriateness of using or redistributing the Work and assume any
    +   risks associated with Your exercise of permissions under this License.
    +
    +8. Limitation of Liability. In no event and under no legal theory,
    +   whether in tort (including negligence), contract, or otherwise,
    +   unless required by applicable law (such as deliberate and grossly
    +   negligent acts) or agreed to in writing, shall any Contributor be
    +   liable to You for damages, including any direct, indirect, special,
    +   incidental, or consequential damages of any character arising as a
    +   result of this License or out of the use or inability to use the
    +   Work (including but not limited to damages for loss of goodwill,
    +   work stoppage, computer failure or malfunction, or any and all
    +   other commercial damages or losses), even if such Contributor
    +   has been advised of the possibility of such damages.
    +
    +9. Accepting Warranty or Additional Liability. While redistributing
    +   the Work or Derivative Works thereof, You may choose to offer,
    +   and charge a fee for, acceptance of support, warranty, indemnity,
    +   or other liability obligations and/or rights consistent with this
    +   License. However, in accepting such obligations, You may act only
    +   on Your own behalf and on Your sole responsibility, not on behalf
    +   of any other Contributor, and only if You agree to indemnify,
    +   defend, and hold each Contributor harmless for any liability
    +   incurred by, or claims asserted against, such Contributor by reason
    +   of your accepting any such warranty or additional liability.
    +
    +END OF TERMS AND CONDITIONS
    +
    +APPENDIX: How to apply the Apache License to your work.
    +
    +   To apply the Apache License to your work, attach the following
    +   boilerplate notice, with the fields enclosed by brackets "[]"
    +   replaced with your own identifying information. (Don't include
    +   the brackets!)  The text should be enclosed in the appropriate
    +   comment syntax for the file format. We also recommend that a
    +   file or class name and description of purpose be included on the
    +   same "printed page" as the copyright notice for easier
    +   identification within third-party archives.
    +
    +Copyright [yyyy] [name of copyright owner]
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
      Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
     
    -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
     
    -1. Definitions.
    +   1. Definitions.
     
    -   "License" shall mean the terms and conditions for use, reproduction,
    -   and distribution as defined by Sections 1 through 9 of this document.
    +      "License" shall mean the terms and conditions for use, reproduction,
    +      and distribution as defined by Sections 1 through 9 of this document.
     
    -   "Licensor" shall mean the copyright owner or entity authorized by
    -   the copyright owner that is granting the License.
    -
    -   "Legal Entity" shall mean the union of the acting entity and all
    -   other entities that control, are controlled by, or are under common
    -   control with that entity. For the purposes of this definition,
    -   "control" means (i) the power, direct or indirect, to cause the
    -   direction or management of such entity, whether by contract or
    -   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -   outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -   "You" (or "Your") shall mean an individual or Legal Entity
    -   exercising permissions granted by this License.
    -
    -   "Source" form shall mean the preferred form for making modifications,
    -   including but not limited to software source code, documentation
    -   source, and configuration files.
    -
    -   "Object" form shall mean any form resulting from mechanical
    -   transformation or translation of a Source form, including but
    -   not limited to compiled object code, generated documentation,
    -   and conversions to other media types.
    -
    -   "Work" shall mean the work of authorship, whether in Source or
    -   Object form, made available under the License, as indicated by a
    -   copyright notice that is included in or attached to the work
    -   (an example is provided in the Appendix below).
    -
    -   "Derivative Works" shall mean any work, whether in Source or Object
    -   form, that is based on (or derived from) the Work and for which the
    -   editorial revisions, annotations, elaborations, or other modifications
    -   represent, as a whole, an original work of authorship. For the purposes
    -   of this License, Derivative Works shall not include works that remain
    -   separable from, or merely link (or bind by name) to the interfaces of,
    -   the Work and Derivative Works thereof.
    -
    -   "Contribution" shall mean any work of authorship, including
    -   the original version of the Work and any modifications or additions
    -   to that Work or Derivative Works thereof, that is intentionally
    -   submitted to Licensor for inclusion in the Work by the copyright owner
    -   or by an individual or Legal Entity authorized to submit on behalf of
    -   the copyright owner. For the purposes of this definition, "submitted"
    -   means any form of electronic, verbal, or written communication sent
    -   to the Licensor or its representatives, including but not limited to
    -   communication on electronic mailing lists, source code control systems,
    -   and issue tracking systems that are managed by, or on behalf of, the
    -   Licensor for the purpose of discussing and improving the Work, but
    -   excluding communication that is conspicuously marked or otherwise
    -   designated in writing by the copyright owner as "Not a Contribution."
    -
    -   "Contributor" shall mean Licensor and any individual or Legal Entity
    -   on behalf of whom a Contribution has been received by Licensor and
    -   subsequently incorporated within the Work.
    -
    -2. Grant of Copyright License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   copyright license to reproduce, prepare Derivative Works of,
    -   publicly display, publicly perform, sublicense, and distribute the
    -   Work and such Derivative Works in Source or Object form.
    -
    -3. Grant of Patent License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   (except as stated in this section) patent license to make, have made,
    -   use, offer to sell, sell, import, and otherwise transfer the Work,
    -   where such license applies only to those patent claims licensable
    -   by such Contributor that are necessarily infringed by their
    -   Contribution(s) alone or by combination of their Contribution(s)
    -   with the Work to which such Contribution(s) was submitted. If You
    -   institute patent litigation against any entity (including a
    -   cross-claim or counterclaim in a lawsuit) alleging that the Work
    -   or a Contribution incorporated within the Work constitutes direct
    -   or contributory patent infringement, then any patent licenses
    -   granted to You under this License for that Work shall terminate
    -   as of the date such litigation is filed.
    -
    -4. Redistribution. You may reproduce and distribute copies of the
    -   Work or Derivative Works thereof in any medium, with or without
    -   modifications, and in Source or Object form, provided that You
    -   meet the following conditions:
    -
    -   (a) You must give any other recipients of the Work or
    -       Derivative Works a copy of this License; and
    -
    -   (b) You must cause any modified files to carry prominent notices
    -       stating that You changed the files; and
    -
    -   (c) You must retain, in the Source form of any Derivative Works
    -       that You distribute, all copyright, patent, trademark, and
    -       attribution notices from the Source form of the Work,
    -       excluding those notices that do not pertain to any part of
    -       the Derivative Works; and
    -
    -   (d) If the Work includes a "NOTICE" text file as part of its
    -       distribution, then any Derivative Works that You distribute must
    -       include a readable copy of the attribution notices contained
    -       within such NOTICE file, excluding those notices that do not
    -       pertain to any part of the Derivative Works, in at least one
    -       of the following places: within a NOTICE text file distributed
    -       as part of the Derivative Works; within the Source form or
    -       documentation, if provided along with the Derivative Works; or,
    -       within a display generated by the Derivative Works, if and
    -       wherever such third-party notices normally appear. The contents
    -       of the NOTICE file are for informational purposes only and
    -       do not modify the License. You may add Your own attribution
    -       notices within Derivative Works that You distribute, alongside
    -       or as an addendum to the NOTICE text from the Work, provided
    -       that such additional attribution notices cannot be construed
    -       as modifying the License.
    -
    -   You may add Your own copyright statement to Your modifications and
    -   may provide additional or different license terms and conditions
    -   for use, reproduction, or distribution of Your modifications, or
    -   for any such Derivative Works as a whole, provided Your use,
    -   reproduction, and distribution of the Work otherwise complies with
    -   the conditions stated in this License.
    -
    -5. Submission of Contributions. Unless You explicitly state otherwise,
    -   any Contribution intentionally submitted for inclusion in the Work
    -   by You to the Licensor shall be under the terms and conditions of
    -   this License, without any additional terms or conditions.
    -   Notwithstanding the above, nothing herein shall supersede or modify
    -   the terms of any separate license agreement you may have executed
    -   with Licensor regarding such Contributions.
    -
    -6. Trademarks. This License does not grant permission to use the trade
    -   names, trademarks, service marks, or product names of the Licensor,
    -   except as required for reasonable and customary use in describing the
    -   origin of the Work and reproducing the content of the NOTICE file.
    -
    -7. Disclaimer of Warranty. Unless required by applicable law or
    -   agreed to in writing, Licensor provides the Work (and each
    -   Contributor provides its Contributions) on an "AS IS" BASIS,
    -   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -   implied, including, without limitation, any warranties or conditions
    -   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -   PARTICULAR PURPOSE. You are solely responsible for determining the
    -   appropriateness of using or redistributing the Work and assume any
    -   risks associated with Your exercise of permissions under this License.
    -
    -8. Limitation of Liability. In no event and under no legal theory,
    -   whether in tort (including negligence), contract, or otherwise,
    -   unless required by applicable law (such as deliberate and grossly
    -   negligent acts) or agreed to in writing, shall any Contributor be
    -   liable to You for damages, including any direct, indirect, special,
    -   incidental, or consequential damages of any character arising as a
    -   result of this License or out of the use or inability to use the
    -   Work (including but not limited to damages for loss of goodwill,
    -   work stoppage, computer failure or malfunction, or any and all
    -   other commercial damages or losses), even if such Contributor
    -   has been advised of the possibility of such damages.
    -
    -9. Accepting Warranty or Additional Liability. While redistributing
    -   the Work or Derivative Works thereof, You may choose to offer,
    -   and charge a fee for, acceptance of support, warranty, indemnity,
    -   or other liability obligations and/or rights consistent with this
    -   License. However, in accepting such obligations, You may act only
    -   on Your own behalf and on Your sole responsibility, not on behalf
    -   of any other Contributor, and only if You agree to indemnify,
    -   defend, and hold each Contributor harmless for any liability
    -   incurred by, or claims asserted against, such Contributor by reason
    -   of your accepting any such warranty or additional liability.
    -
    -END OF TERMS AND CONDITIONS
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
                                  Apache License
    -                        Version 2.0, January 2004
    -                     http://www.apache.org/licenses/
    -
    -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -1. Definitions.
    -
    -   "License" shall mean the terms and conditions for use, reproduction,
    -   and distribution as defined by Sections 1 through 9 of this document.
    -
    -   "Licensor" shall mean the copyright owner or entity authorized by
    -   the copyright owner that is granting the License.
    -
    -   "Legal Entity" shall mean the union of the acting entity and all
    -   other entities that control, are controlled by, or are under common
    -   control with that entity. For the purposes of this definition,
    -   "control" means (i) the power, direct or indirect, to cause the
    -   direction or management of such entity, whether by contract or
    -   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -   outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -   "You" (or "Your") shall mean an individual or Legal Entity
    -   exercising permissions granted by this License.
    -
    -   "Source" form shall mean the preferred form for making modifications,
    -   including but not limited to software source code, documentation
    -   source, and configuration files.
    -
    -   "Object" form shall mean any form resulting from mechanical
    -   transformation or translation of a Source form, including but
    -   not limited to compiled object code, generated documentation,
    -   and conversions to other media types.
    -
    -   "Work" shall mean the work of authorship, whether in Source or
    -   Object form, made available under the License, as indicated by a
    -   copyright notice that is included in or attached to the work
    -   (an example is provided in the Appendix below).
    -
    -   "Derivative Works" shall mean any work, whether in Source or Object
    -   form, that is based on (or derived from) the Work and for which the
    -   editorial revisions, annotations, elaborations, or other modifications
    -   represent, as a whole, an original work of authorship. For the purposes
    -   of this License, Derivative Works shall not include works that remain
    -   separable from, or merely link (or bind by name) to the interfaces of,
    -   the Work and Derivative Works thereof.
    -
    -   "Contribution" shall mean any work of authorship, including
    -   the original version of the Work and any modifications or additions
    -   to that Work or Derivative Works thereof, that is intentionally
    -   submitted to Licensor for inclusion in the Work by the copyright owner
    -   or by an individual or Legal Entity authorized to submit on behalf of
    -   the copyright owner. For the purposes of this definition, "submitted"
    -   means any form of electronic, verbal, or written communication sent
    -   to the Licensor or its representatives, including but not limited to
    -   communication on electronic mailing lists, source code control systems,
    -   and issue tracking systems that are managed by, or on behalf of, the
    -   Licensor for the purpose of discussing and improving the Work, but
    -   excluding communication that is conspicuously marked or otherwise
    -   designated in writing by the copyright owner as "Not a Contribution."
    -
    -   "Contributor" shall mean Licensor and any individual or Legal Entity
    -   on behalf of whom a Contribution has been received by Licensor and
    -   subsequently incorporated within the Work.
    -
    -2. Grant of Copyright License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   copyright license to reproduce, prepare Derivative Works of,
    -   publicly display, publicly perform, sublicense, and distribute the
    -   Work and such Derivative Works in Source or Object form.
    -
    -3. Grant of Patent License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   (except as stated in this section) patent license to make, have made,
    -   use, offer to sell, sell, import, and otherwise transfer the Work,
    -   where such license applies only to those patent claims licensable
    -   by such Contributor that are necessarily infringed by their
    -   Contribution(s) alone or by combination of their Contribution(s)
    -   with the Work to which such Contribution(s) was submitted. If You
    -   institute patent litigation against any entity (including a
    -   cross-claim or counterclaim in a lawsuit) alleging that the Work
    -   or a Contribution incorporated within the Work constitutes direct
    -   or contributory patent infringement, then any patent licenses
    -   granted to You under this License for that Work shall terminate
    -   as of the date such litigation is filed.
    -
    -4. Redistribution. You may reproduce and distribute copies of the
    -   Work or Derivative Works thereof in any medium, with or without
    -   modifications, and in Source or Object form, provided that You
    -   meet the following conditions:
    -
    -   (a) You must give any other recipients of the Work or
    -       Derivative Works a copy of this License; and
    -
    -   (b) You must cause any modified files to carry prominent notices
    -       stating that You changed the files; and
    -
    -   (c) You must retain, in the Source form of any Derivative Works
    -       that You distribute, all copyright, patent, trademark, and
    -       attribution notices from the Source form of the Work,
    -       excluding those notices that do not pertain to any part of
    -       the Derivative Works; and
    -
    -   (d) If the Work includes a "NOTICE" text file as part of its
    -       distribution, then any Derivative Works that You distribute must
    -       include a readable copy of the attribution notices contained
    -       within such NOTICE file, excluding those notices that do not
    -       pertain to any part of the Derivative Works, in at least one
    -       of the following places: within a NOTICE text file distributed
    -       as part of the Derivative Works; within the Source form or
    -       documentation, if provided along with the Derivative Works; or,
    -       within a display generated by the Derivative Works, if and
    -       wherever such third-party notices normally appear. The contents
    -       of the NOTICE file are for informational purposes only and
    -       do not modify the License. You may add Your own attribution
    -       notices within Derivative Works that You distribute, alongside
    -       or as an addendum to the NOTICE text from the Work, provided
    -       that such additional attribution notices cannot be construed
    -       as modifying the License.
    -
    -   You may add Your own copyright statement to Your modifications and
    -   may provide additional or different license terms and conditions
    -   for use, reproduction, or distribution of Your modifications, or
    -   for any such Derivative Works as a whole, provided Your use,
    -   reproduction, and distribution of the Work otherwise complies with
    -   the conditions stated in this License.
    -
    -5. Submission of Contributions. Unless You explicitly state otherwise,
    -   any Contribution intentionally submitted for inclusion in the Work
    -   by You to the Licensor shall be under the terms and conditions of
    -   this License, without any additional terms or conditions.
    -   Notwithstanding the above, nothing herein shall supersede or modify
    -   the terms of any separate license agreement you may have executed
    -   with Licensor regarding such Contributions.
    -
    -6. Trademarks. This License does not grant permission to use the trade
    -   names, trademarks, service marks, or product names of the Licensor,
    -   except as required for reasonable and customary use in describing the
    -   origin of the Work and reproducing the content of the NOTICE file.
    -
    -7. Disclaimer of Warranty. Unless required by applicable law or
    -   agreed to in writing, Licensor provides the Work (and each
    -   Contributor provides its Contributions) on an "AS IS" BASIS,
    -   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -   implied, including, without limitation, any warranties or conditions
    -   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -   PARTICULAR PURPOSE. You are solely responsible for determining the
    -   appropriateness of using or redistributing the Work and assume any
    -   risks associated with Your exercise of permissions under this License.
    -
    -8. Limitation of Liability. In no event and under no legal theory,
    -   whether in tort (including negligence), contract, or otherwise,
    -   unless required by applicable law (such as deliberate and grossly
    -   negligent acts) or agreed to in writing, shall any Contributor be
    -   liable to You for damages, including any direct, indirect, special,
    -   incidental, or consequential damages of any character arising as a
    -   result of this License or out of the use or inability to use the
    -   Work (including but not limited to damages for loss of goodwill,
    -   work stoppage, computer failure or malfunction, or any and all
    -   other commercial damages or losses), even if such Contributor
    -   has been advised of the possibility of such damages.
    -
    -9. Accepting Warranty or Additional Liability. While redistributing
    -   the Work or Derivative Works thereof, You may choose to offer,
    -   and charge a fee for, acceptance of support, warranty, indemnity,
    -   or other liability obligations and/or rights consistent with this
    -   License. However, in accepting such obligations, You may act only
    -   on Your own behalf and on Your sole responsibility, not on behalf
    -   of any other Contributor, and only if You agree to indemnify,
    -   defend, and hold each Contributor harmless for any liability
    -   incurred by, or claims asserted against, such Contributor by reason
    -   of your accepting any such warranty or additional liability.
    -
    -END OF TERMS AND CONDITIONS
    -
    -APPENDIX: How to apply the Apache License to your work.
    -
    -   To apply the Apache License to your work, attach the following
    -   boilerplate notice, with the fields enclosed by brackets "[]"
    -   replaced with your own identifying information. (Don't include
    -   the brackets!)  The text should be enclosed in the appropriate
    -   comment syntax for the file format. We also recommend that a
    -   file or class name and description of purpose be included on the
    -   same "printed page" as the copyright notice for easier
    -   identification within third-party archives.
    -
    -Copyright [yyyy] [name of copyright owner]
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -	http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
                                  Apache License
    -                        Version 2.0, January 2004
    -                     http://www.apache.org/licenses/
    -
    -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -1. Definitions.
    -
    -   "License" shall mean the terms and conditions for use, reproduction,
    -   and distribution as defined by Sections 1 through 9 of this document.
    -
    -   "Licensor" shall mean the copyright owner or entity authorized by
    -   the copyright owner that is granting the License.
    -
    -   "Legal Entity" shall mean the union of the acting entity and all
    -   other entities that control, are controlled by, or are under common
    -   control with that entity. For the purposes of this definition,
    -   "control" means (i) the power, direct or indirect, to cause the
    -   direction or management of such entity, whether by contract or
    -   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -   outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -   "You" (or "Your") shall mean an individual or Legal Entity
    -   exercising permissions granted by this License.
    -
    -   "Source" form shall mean the preferred form for making modifications,
    -   including but not limited to software source code, documentation
    -   source, and configuration files.
    -
    -   "Object" form shall mean any form resulting from mechanical
    -   transformation or translation of a Source form, including but
    -   not limited to compiled object code, generated documentation,
    -   and conversions to other media types.
    -
    -   "Work" shall mean the work of authorship, whether in Source or
    -   Object form, made available under the License, as indicated by a
    -   copyright notice that is included in or attached to the work
    -   (an example is provided in the Appendix below).
    -
    -   "Derivative Works" shall mean any work, whether in Source or Object
    -   form, that is based on (or derived from) the Work and for which the
    -   editorial revisions, annotations, elaborations, or other modifications
    -   represent, as a whole, an original work of authorship. For the purposes
    -   of this License, Derivative Works shall not include works that remain
    -   separable from, or merely link (or bind by name) to the interfaces of,
    -   the Work and Derivative Works thereof.
    -
    -   "Contribution" shall mean any work of authorship, including
    -   the original version of the Work and any modifications or additions
    -   to that Work or Derivative Works thereof, that is intentionally
    -   submitted to Licensor for inclusion in the Work by the copyright owner
    -   or by an individual or Legal Entity authorized to submit on behalf of
    -   the copyright owner. For the purposes of this definition, "submitted"
    -   means any form of electronic, verbal, or written communication sent
    -   to the Licensor or its representatives, including but not limited to
    -   communication on electronic mailing lists, source code control systems,
    -   and issue tracking systems that are managed by, or on behalf of, the
    -   Licensor for the purpose of discussing and improving the Work, but
    -   excluding communication that is conspicuously marked or otherwise
    -   designated in writing by the copyright owner as "Not a Contribution."
    -
    -   "Contributor" shall mean Licensor and any individual or Legal Entity
    -   on behalf of whom a Contribution has been received by Licensor and
    -   subsequently incorporated within the Work.
    -
    -2. Grant of Copyright License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   copyright license to reproduce, prepare Derivative Works of,
    -   publicly display, publicly perform, sublicense, and distribute the
    -   Work and such Derivative Works in Source or Object form.
    -
    -3. Grant of Patent License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   (except as stated in this section) patent license to make, have made,
    -   use, offer to sell, sell, import, and otherwise transfer the Work,
    -   where such license applies only to those patent claims licensable
    -   by such Contributor that are necessarily infringed by their
    -   Contribution(s) alone or by combination of their Contribution(s)
    -   with the Work to which such Contribution(s) was submitted. If You
    -   institute patent litigation against any entity (including a
    -   cross-claim or counterclaim in a lawsuit) alleging that the Work
    -   or a Contribution incorporated within the Work constitutes direct
    -   or contributory patent infringement, then any patent licenses
    -   granted to You under this License for that Work shall terminate
    -   as of the date such litigation is filed.
    -
    -4. Redistribution. You may reproduce and distribute copies of the
    -   Work or Derivative Works thereof in any medium, with or without
    -   modifications, and in Source or Object form, provided that You
    -   meet the following conditions:
    -
    -   (a) You must give any other recipients of the Work or
    -       Derivative Works a copy of this License; and
    -
    -   (b) You must cause any modified files to carry prominent notices
    -       stating that You changed the files; and
    -
    -   (c) You must retain, in the Source form of any Derivative Works
    -       that You distribute, all copyright, patent, trademark, and
    -       attribution notices from the Source form of the Work,
    -       excluding those notices that do not pertain to any part of
    -       the Derivative Works; and
    -
    -   (d) If the Work includes a "NOTICE" text file as part of its
    -       distribution, then any Derivative Works that You distribute must
    -       include a readable copy of the attribution notices contained
    -       within such NOTICE file, excluding those notices that do not
    -       pertain to any part of the Derivative Works, in at least one
    -       of the following places: within a NOTICE text file distributed
    -       as part of the Derivative Works; within the Source form or
    -       documentation, if provided along with the Derivative Works; or,
    -       within a display generated by the Derivative Works, if and
    -       wherever such third-party notices normally appear. The contents
    -       of the NOTICE file are for informational purposes only and
    -       do not modify the License. You may add Your own attribution
    -       notices within Derivative Works that You distribute, alongside
    -       or as an addendum to the NOTICE text from the Work, provided
    -       that such additional attribution notices cannot be construed
    -       as modifying the License.
    -
    -   You may add Your own copyright statement to Your modifications and
    -   may provide additional or different license terms and conditions
    -   for use, reproduction, or distribution of Your modifications, or
    -   for any such Derivative Works as a whole, provided Your use,
    -   reproduction, and distribution of the Work otherwise complies with
    -   the conditions stated in this License.
    -
    -5. Submission of Contributions. Unless You explicitly state otherwise,
    -   any Contribution intentionally submitted for inclusion in the Work
    -   by You to the Licensor shall be under the terms and conditions of
    -   this License, without any additional terms or conditions.
    -   Notwithstanding the above, nothing herein shall supersede or modify
    -   the terms of any separate license agreement you may have executed
    -   with Licensor regarding such Contributions.
    -
    -6. Trademarks. This License does not grant permission to use the trade
    -   names, trademarks, service marks, or product names of the Licensor,
    -   except as required for reasonable and customary use in describing the
    -   origin of the Work and reproducing the content of the NOTICE file.
    -
    -7. Disclaimer of Warranty. Unless required by applicable law or
    -   agreed to in writing, Licensor provides the Work (and each
    -   Contributor provides its Contributions) on an "AS IS" BASIS,
    -   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -   implied, including, without limitation, any warranties or conditions
    -   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -   PARTICULAR PURPOSE. You are solely responsible for determining the
    -   appropriateness of using or redistributing the Work and assume any
    -   risks associated with Your exercise of permissions under this License.
    -
    -8. Limitation of Liability. In no event and under no legal theory,
    -   whether in tort (including negligence), contract, or otherwise,
    -   unless required by applicable law (such as deliberate and grossly
    -   negligent acts) or agreed to in writing, shall any Contributor be
    -   liable to You for damages, including any direct, indirect, special,
    -   incidental, or consequential damages of any character arising as a
    -   result of this License or out of the use or inability to use the
    -   Work (including but not limited to damages for loss of goodwill,
    -   work stoppage, computer failure or malfunction, or any and all
    -   other commercial damages or losses), even if such Contributor
    -   has been advised of the possibility of such damages.
    -
    -9. Accepting Warranty or Additional Liability. While redistributing
    -   the Work or Derivative Works thereof, You may choose to offer,
    -   and charge a fee for, acceptance of support, warranty, indemnity,
    -   or other liability obligations and/or rights consistent with this
    -   License. However, in accepting such obligations, You may act only
    -   on Your own behalf and on Your sole responsibility, not on behalf
    -   of any other Contributor, and only if You agree to indemnify,
    -   defend, and hold each Contributor harmless for any liability
    -   incurred by, or claims asserted against, such Contributor by reason
    -   of your accepting any such warranty or additional liability.
    -
    -END OF TERMS AND CONDITIONS
    -
    -APPENDIX: How to apply the Apache License to your work.
    -
    -   To apply the Apache License to your work, attach the following
    -   boilerplate notice, with the fields enclosed by brackets "[]"
    -   replaced with your own identifying information. (Don't include
    -   the brackets!)  The text should be enclosed in the appropriate
    -   comment syntax for the file format. We also recommend that a
    -   file or class name and description of purpose be included on the
    -   same "printed page" as the copyright notice for easier
    -   identification within third-party archives.
    -
    -Copyright [yyyy] [name of copyright owner]
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -	http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
                                  Apache License
    -                        Version 2.0, January 2004
    -                     http://www.apache.org/licenses/
    -
    -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -1. Definitions.
    -
    -   "License" shall mean the terms and conditions for use, reproduction,
    -   and distribution as defined by Sections 1 through 9 of this document.
    -
    -   "Licensor" shall mean the copyright owner or entity authorized by
    -   the copyright owner that is granting the License.
    -
    -   "Legal Entity" shall mean the union of the acting entity and all
    -   other entities that control, are controlled by, or are under common
    -   control with that entity. For the purposes of this definition,
    -   "control" means (i) the power, direct or indirect, to cause the
    -   direction or management of such entity, whether by contract or
    -   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -   outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -   "You" (or "Your") shall mean an individual or Legal Entity
    -   exercising permissions granted by this License.
    -
    -   "Source" form shall mean the preferred form for making modifications,
    -   including but not limited to software source code, documentation
    -   source, and configuration files.
    -
    -   "Object" form shall mean any form resulting from mechanical
    -   transformation or translation of a Source form, including but
    -   not limited to compiled object code, generated documentation,
    -   and conversions to other media types.
    -
    -   "Work" shall mean the work of authorship, whether in Source or
    -   Object form, made available under the License, as indicated by a
    -   copyright notice that is included in or attached to the work
    -   (an example is provided in the Appendix below).
    -
    -   "Derivative Works" shall mean any work, whether in Source or Object
    -   form, that is based on (or derived from) the Work and for which the
    -   editorial revisions, annotations, elaborations, or other modifications
    -   represent, as a whole, an original work of authorship. For the purposes
    -   of this License, Derivative Works shall not include works that remain
    -   separable from, or merely link (or bind by name) to the interfaces of,
    -   the Work and Derivative Works thereof.
    -
    -   "Contribution" shall mean any work of authorship, including
    -   the original version of the Work and any modifications or additions
    -   to that Work or Derivative Works thereof, that is intentionally
    -   submitted to Licensor for inclusion in the Work by the copyright owner
    -   or by an individual or Legal Entity authorized to submit on behalf of
    -   the copyright owner. For the purposes of this definition, "submitted"
    -   means any form of electronic, verbal, or written communication sent
    -   to the Licensor or its representatives, including but not limited to
    -   communication on electronic mailing lists, source code control systems,
    -   and issue tracking systems that are managed by, or on behalf of, the
    -   Licensor for the purpose of discussing and improving the Work, but
    -   excluding communication that is conspicuously marked or otherwise
    -   designated in writing by the copyright owner as "Not a Contribution."
    -
    -   "Contributor" shall mean Licensor and any individual or Legal Entity
    -   on behalf of whom a Contribution has been received by Licensor and
    -   subsequently incorporated within the Work.
    -
    -2. Grant of Copyright License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   copyright license to reproduce, prepare Derivative Works of,
    -   publicly display, publicly perform, sublicense, and distribute the
    -   Work and such Derivative Works in Source or Object form.
    -
    -3. Grant of Patent License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   (except as stated in this section) patent license to make, have made,
    -   use, offer to sell, sell, import, and otherwise transfer the Work,
    -   where such license applies only to those patent claims licensable
    -   by such Contributor that are necessarily infringed by their
    -   Contribution(s) alone or by combination of their Contribution(s)
    -   with the Work to which such Contribution(s) was submitted. If You
    -   institute patent litigation against any entity (including a
    -   cross-claim or counterclaim in a lawsuit) alleging that the Work
    -   or a Contribution incorporated within the Work constitutes direct
    -   or contributory patent infringement, then any patent licenses
    -   granted to You under this License for that Work shall terminate
    -   as of the date such litigation is filed.
    -
    -4. Redistribution. You may reproduce and distribute copies of the
    -   Work or Derivative Works thereof in any medium, with or without
    -   modifications, and in Source or Object form, provided that You
    -   meet the following conditions:
    -
    -   (a) You must give any other recipients of the Work or
    -       Derivative Works a copy of this License; and
    -
    -   (b) You must cause any modified files to carry prominent notices
    -       stating that You changed the files; and
    -
    -   (c) You must retain, in the Source form of any Derivative Works
    -       that You distribute, all copyright, patent, trademark, and
    -       attribution notices from the Source form of the Work,
    -       excluding those notices that do not pertain to any part of
    -       the Derivative Works; and
    -
    -   (d) If the Work includes a "NOTICE" text file as part of its
    -       distribution, then any Derivative Works that You distribute must
    -       include a readable copy of the attribution notices contained
    -       within such NOTICE file, excluding those notices that do not
    -       pertain to any part of the Derivative Works, in at least one
    -       of the following places: within a NOTICE text file distributed
    -       as part of the Derivative Works; within the Source form or
    -       documentation, if provided along with the Derivative Works; or,
    -       within a display generated by the Derivative Works, if and
    -       wherever such third-party notices normally appear. The contents
    -       of the NOTICE file are for informational purposes only and
    -       do not modify the License. You may add Your own attribution
    -       notices within Derivative Works that You distribute, alongside
    -       or as an addendum to the NOTICE text from the Work, provided
    -       that such additional attribution notices cannot be construed
    -       as modifying the License.
    -
    -   You may add Your own copyright statement to Your modifications and
    -   may provide additional or different license terms and conditions
    -   for use, reproduction, or distribution of Your modifications, or
    -   for any such Derivative Works as a whole, provided Your use,
    -   reproduction, and distribution of the Work otherwise complies with
    -   the conditions stated in this License.
    -
    -5. Submission of Contributions. Unless You explicitly state otherwise,
    -   any Contribution intentionally submitted for inclusion in the Work
    -   by You to the Licensor shall be under the terms and conditions of
    -   this License, without any additional terms or conditions.
    -   Notwithstanding the above, nothing herein shall supersede or modify
    -   the terms of any separate license agreement you may have executed
    -   with Licensor regarding such Contributions.
    -
    -6. Trademarks. This License does not grant permission to use the trade
    -   names, trademarks, service marks, or product names of the Licensor,
    -   except as required for reasonable and customary use in describing the
    -   origin of the Work and reproducing the content of the NOTICE file.
    -
    -7. Disclaimer of Warranty. Unless required by applicable law or
    -   agreed to in writing, Licensor provides the Work (and each
    -   Contributor provides its Contributions) on an "AS IS" BASIS,
    -   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -   implied, including, without limitation, any warranties or conditions
    -   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -   PARTICULAR PURPOSE. You are solely responsible for determining the
    -   appropriateness of using or redistributing the Work and assume any
    -   risks associated with Your exercise of permissions under this License.
    -
    -8. Limitation of Liability. In no event and under no legal theory,
    -   whether in tort (including negligence), contract, or otherwise,
    -   unless required by applicable law (such as deliberate and grossly
    -   negligent acts) or agreed to in writing, shall any Contributor be
    -   liable to You for damages, including any direct, indirect, special,
    -   incidental, or consequential damages of any character arising as a
    -   result of this License or out of the use or inability to use the
    -   Work (including but not limited to damages for loss of goodwill,
    -   work stoppage, computer failure or malfunction, or any and all
    -   other commercial damages or losses), even if such Contributor
    -   has been advised of the possibility of such damages.
    -
    -9. Accepting Warranty or Additional Liability. While redistributing
    -   the Work or Derivative Works thereof, You may choose to offer,
    -   and charge a fee for, acceptance of support, warranty, indemnity,
    -   or other liability obligations and/or rights consistent with this
    -   License. However, in accepting such obligations, You may act only
    -   on Your own behalf and on Your sole responsibility, not on behalf
    -   of any other Contributor, and only if You agree to indemnify,
    -   defend, and hold each Contributor harmless for any liability
    -   incurred by, or claims asserted against, such Contributor by reason
    -   of your accepting any such warranty or additional liability.
    -
    -END OF TERMS AND CONDITIONS
    -
    -APPENDIX: How to apply the Apache License to your work.
    -
    -   To apply the Apache License to your work, attach the following
    -   boilerplate notice, with the fields enclosed by brackets "[]"
    -   replaced with your own identifying information. (Don't include
    -   the brackets!)  The text should be enclosed in the appropriate
    -   comment syntax for the file format. We also recommend that a
    -   file or class name and description of purpose be included on the
    -   same "printed page" as the copyright notice for easier
    -   identification within third-party archives.
    -
    -Copyright [yyyy] [name of copyright owner]
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -	http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
      Apache License
    -                           Version 2.0, January 2004
    -                        http://www.apache.org/licenses/
    -
    -   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -   1. Definitions.
    -
    -      "License" shall mean the terms and conditions for use, reproduction,
    -      and distribution as defined by Sections 1 through 9 of this document.
    -
    -      "Licensor" shall mean the copyright owner or entity authorized by
    -      the copyright owner that is granting the License.
    +      "Licensor" shall mean the copyright owner or entity authorized by
    +      the copyright owner that is granting the License.
     
           "Legal Entity" shall mean the union of the acting entity and all
           other entities that control, are controlled by, or are under common
    @@ -11257,53 +11067,6 @@ 

    Used by:

    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    # Contributing
    -
    -## License
    -
    -Licensed under either of
    -
    - * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
    - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
    -
    -at your option.
    -
    -### Contribution
    -
    -Unless you explicitly state otherwise, any contribution intentionally submitted
    -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
    -additional terms or conditions.
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    ../../LICENSE-APACHE
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    // Licensed under the Apache License, Version 2.0
    -// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
    -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
    -// All files in the project carrying such notice may not be copied, modified, or distributed
    -// except according to those terms.
     
  • @@ -11940,6 +11703,9 @@

    Used by:

    Apache License 2.0

    Used by:

      +
    • apollo-compiler
    • +
    • apollo-parser
    • +
    • apollo-smith
    • async-graphql-axum
    • async-graphql-derive
    • async-graphql-parser
    • @@ -11960,9 +11726,11 @@

      Used by:

    • num-cmp
    • prost
    • rhai_codegen
    • +
    • serde_derive_default
    • siphasher
    • system-configuration
    • system-configuration-sys
    • +
    • tagptr
    • thrift
    • try_match_inner
    • unic-char-property
    • @@ -12034,105 +11802,19 @@

      Used by:

      To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -
  • - -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    Copyright 2021 Oliver Giersch
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    Copyright [2023] [Bryn Cooke]
    +Copyright [yyyy] [name of copyright owner]
     
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
     You may obtain a copy of the License at
     
    -    http://www.apache.org/licenses/LICENSE-2.0
    +http://www.apache.org/licenses/LICENSE-2.0
     
     Unless required by applicable law or agreed to in writing, software
     distributed under the License is distributed on an "AS IS" BASIS,
     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     See the License for the specific language governing permissions and
     limitations under the License.
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    Licensed under the Apache License, Version 2.0
    -<LICENSE-APACHE or
    -http://www.apache.org/licenses/LICENSE-2.0> or the MIT
    -license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
    -at your option. All files in the project carrying such
    -notice may not be copied, modified, or distributed except
    -according to those terms.
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    MIT OR Apache-2.0
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    MIT OR Apache-2.0
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    MIT or Apache-2.0
     
  • @@ -12206,7 +11888,7 @@

    Used by:

  • Inflector
  • str_inflector
  • -
    Copyright (c) <year> <owner>
    +                
    Copyright (c) <year> <owner> 
     
     Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
     
    @@ -12351,7 +12033,7 @@ 

    Used by:

    PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  • @@ -12360,33 +12042,33 @@

    Used by:

    -
    Copyright (c) 2019, Sébastien Crozet
    -All rights reserved.
    -
    -Redistribution and use in source and binary forms, with or without
    -modification, are permitted provided that the following conditions are met:
    -
    -1. Redistributions of source code must retain the above copyright notice, this
    -   list of conditions and the following disclaimer.
    -
    -2. Redistributions in binary form must reproduce the above copyright notice,
    -   this list of conditions and the following disclaimer in the documentation
    -   and/or other materials provided with the distribution.
    -
    -3. Neither the name of the author nor the names of its contributors may be used
    -   to endorse or promote products derived from this software without specific
    -   prior written permission.
    -
    -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
    -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
    -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
    -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
    -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
    -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
    -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
    -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    +                
    Copyright (c) 2019, Sébastien Crozet
    +All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are met:
    +
    +1. Redistributions of source code must retain the above copyright notice, this
    +   list of conditions and the following disclaimer.
    +
    +2. Redistributions in binary form must reproduce the above copyright notice,
    +   this list of conditions and the following disclaimer in the documentation
    +   and/or other materials provided with the distribution.
    +
    +3. Neither the name of the author nor the names of its contributors may be used
    +   to endorse or promote products derived from this software without specific
    +   prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
    +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
    +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
    +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
    +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
    +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
    +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
    +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     
  • @@ -12395,7 +12077,7 @@

    Used by:

    -
    Copyright (c) <year> <owner>.
    +                
    Copyright (c) <year> <owner>. 
     
     Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
     
    @@ -12448,46 +12130,46 @@ 

    Used by:

    -
    Creative Commons CC0 1.0 Universal
    -
    -<<beginOptional;name=ccOptionalIntro>> CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER.  <<endOptional>>
    -
    -Statement of Purpose
    -
    -The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work").
    -
    -Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others.
    -
    -For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights.
    -
    -1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following:
    -
    -     i. the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work;
    -
    -     ii. moral rights retained by the original author(s) and/or performer(s);
    -
    -     iii. publicity and privacy rights pertaining to a person's image or likeness depicted in a Work;
    -
    -     iv. rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below;
    -
    -     v. rights protecting the extraction, dissemination, use and reuse of data in a Work;
    -
    -     vi. database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and
    -
    -     vii. other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof.
    -
    -2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose.
    -
    -3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose.
    -
    -4. Limitations and Disclaimers.
    -
    -     a. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document.
    -
    -     b. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law.
    -
    -     c. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work.
    -
    +                
    Creative Commons CC0 1.0 Universal
    +
    +<<beginOptional;name=ccOptionalIntro>> CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER.  <<endOptional>>
    +
    +Statement of Purpose
    +
    +The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work").
    +
    +Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others.
    +
    +For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights.
    +
    +1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following:
    +
    +     i. the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work;
    +
    +     ii. moral rights retained by the original author(s) and/or performer(s);
    +
    +     iii. publicity and privacy rights pertaining to a person's image or likeness depicted in a Work;
    +
    +     iv. rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below;
    +
    +     v. rights protecting the extraction, dissemination, use and reuse of data in a Work;
    +
    +     vi. database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and
    +
    +     vii. other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof.
    +
    +2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose.
    +
    +3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose.
    +
    +4. Limitations and Disclaimers.
    +
    +     a. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document.
    +
    +     b. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law.
    +
    +     c. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work.
    +
          d. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. 
  • @@ -12726,13 +12408,6 @@

    Elastic License 2.0

    Used by:

    Copyright 2021 Apollo Graph, Inc.
     
    @@ -12997,6 +12672,36 @@ 

    Used by:

    // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +
    +
  • +
  • +

    ISC License

    +

    Used by:

    + +
    // Copyright 2021 Brian Smith.
    +//
    +// Permission to use, copy, modify, and/or distribute this software for any
    +// purpose with or without fee is hereby granted, provided that the above
    +// copyright notice and this permission notice appear in all copies.
    +//
    +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
    +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR
    +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    +
    +#[test]
    +fn cert_without_extensions_test() {
    +    // Check the certificate is valid with
    +    // `openssl x509 -in cert_without_extensions.der -inform DER -text -noout`
    +    const CERT_WITHOUT_EXTENSIONS_DER: &[u8] = include_bytes!("cert_without_extensions.der");
    +
    +    assert!(webpki::EndEntityCert::try_from(CERT_WITHOUT_EXTENSIONS_DER).is_ok());
    +}
     
  • @@ -13066,7 +12771,6 @@

    ISC License

    Used by:

    ISC License:
     
    @@ -14682,7 +14386,7 @@ 

    Used by:

    MIT License
     
    -Copyright (c) 2021-2022 Joshua Barretto
    +Copyright (c) 2021-2022 Joshua Barretto 
     
     Permission is hereby granted, free of charge, to any person obtaining a copy
     of this software and associated documentation files (the "Software"), to deal
    @@ -14946,26 +14650,26 @@ 

    Used by:

    -
    MIT License
    -
    -Copyright (c) 2019 Daniel Augusto Rizzi Salvadori
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +                
    MIT License
    +
    +Copyright (c) 2019 Daniel Augusto Rizzi Salvadori
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     SOFTWARE.
  • @@ -15166,37 +14870,6 @@

    Used by:

    MIT License

    Used by:

    -
    The MIT License (MIT)
    -
    -Copyright (c) 2014 Mathijs van de Nes
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    -SOFTWARE.
    -
    -
  • -
  • -

    MIT License

    -

    Used by:

    - @@ -15670,28 +15343,28 @@

    Used by:

    -
    The MIT License (MIT)
    -
    -Copyright (c) 2015 Austin Bonander
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    -SOFTWARE.
    -
    +                
    The MIT License (MIT)
    +
    +Copyright (c) 2015 Austin Bonander
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
    +
     
  • @@ -15700,26 +15373,26 @@

    Used by:

    -
    The MIT License (MIT)
    -
    -Copyright (c) 2015 Bartłomiej Kamiński
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +                
    The MIT License (MIT)
    +
    +Copyright (c) 2015 Bartłomiej Kamiński
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     SOFTWARE.
  • @@ -15728,45 +15401,28 @@

    Used by:

    -
    The MIT License (MIT)
    -
    -Copyright (c) 2015 Markus Westerlind
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in
    -all copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    -THE SOFTWARE.
    -
    -
    -
  • -
  • -

    MIT License

    -

    Used by:

    - -
    This project is dual-licensed under the Unlicense and MIT licenses.
    -
    -You may use this code under the terms of either license.
    +                
    The MIT License (MIT)
    +
    +Copyright (c) 2015 Markus Westerlind
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in
    +all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    +THE SOFTWARE.
    +
     
  • @@ -16204,7 +15860,7 @@

    Used by:

    means any form of the work other than Source Code Form. 1.7. "Larger Work" - means a work that combines Covered Software with other material, in + means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" @@ -16548,6 +16204,7 @@

    Mozilla Public License 2.0

    Used by:

    Mozilla Public License Version 2.0
     ==================================
    @@ -16586,7 +16243,7 @@ 

    Used by:

    means any form of the work other than Source Code Form. 1.7. "Larger Work" - means a work that combines Covered Software with other material, in + means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" @@ -16922,35 +16579,6 @@

    Used by:

    This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. -
    -
  • -
  • -

    Mozilla Public License 2.0

    -

    Used by:

    - -
    This packge contains a modified version of ca-bundle.crt:
    -
    -ca-bundle.crt -- Bundle of CA Root Certificates
    -
    -Certificate data from Mozilla as of: Thu Nov  3 19:04:19 2011#
    -This is a bundle of X.509 certificates of public Certificate Authorities
    -(CA). These were automatically extracted from Mozilla's root certificates
    -file (certdata.txt).  This file can be found in the mozilla source tree:
    -http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1#
    -It contains the certificates in PEM format and therefore
    -can be directly used with curl / libcurl / php_curl, or with
    -an Apache+mod_ssl webserver for SSL client authentication.
    -Just configure this file as the SSLCACertificateFile.#
    -
    -***** BEGIN LICENSE BLOCK *****
    -This Source Code Form is subject to the terms of the Mozilla Public License,
    -v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
    -one at http://mozilla.org/MPL/2.0/.
    -
    -***** END LICENSE BLOCK *****
    -@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $
     
  • @@ -16967,7 +16595,7 @@

    Used by:

    * are met: * * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. + * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in @@ -17021,26 +16649,50 @@

    Used by:

    UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
     
    -Unicode Data Files include all data files under the directories http://www.unicode.org/Public/, http://www.unicode.org/reports/, http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and http://www.unicode.org/utility/trac/browser/.
    -
    -Unicode Data Files do not include PDF online code charts under the directory http://www.unicode.org/Public/.
    -
    -Software includes any source code published in the Unicode Standard or under the directories http://www.unicode.org/Public/, http://www.unicode.org/reports/, http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and http://www.unicode.org/utility/trac/browser/.
    +See Terms of Use <https://www.unicode.org/copyright.html>
    +for definitions of Unicode Inc.’s Data Files and Software.
     
    -NOTICE TO USER: Carefully read the following legal agreement. BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE THE DATA FILES OR SOFTWARE.
    +NOTICE TO USER: Carefully read the following legal agreement.
    +BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
    +DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
    +YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
    +TERMS AND CONDITIONS OF THIS AGREEMENT.
    +IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
    +THE DATA FILES OR SOFTWARE.
     
     COPYRIGHT AND PERMISSION NOTICE
     
    -Copyright © 1991-2016 Unicode, Inc. All rights reserved. Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
    +Copyright © 1991-2022 Unicode, Inc. All rights reserved.
    +Distributed under the Terms of Use in https://www.unicode.org/copyright.html.
     
    -Permission is hereby granted, free of charge, to any person obtaining a copy of the Unicode data files and any associated documentation (the "Data Files") or Unicode software and any associated documentation (the "Software") to deal in the Data Files or Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, and/or sell copies of the Data Files or Software, and to permit persons to whom the Data Files or Software are furnished to do so, provided that either
    -
    -     (a) this copyright and permission notice appear with all copies of the Data Files or Software, or
    -     (b) this copyright and permission notice appear in associated Documentation.
    -
    -THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THE DATA FILES OR SOFTWARE.
    -
    -Except as contained in this notice, the name of a copyright holder shall not be used in advertising or otherwise to promote the sale, use or other dealings in these Data Files or Software without prior written authorization of the copyright holder.
    +Permission is hereby granted, free of charge, to any person obtaining
    +a copy of the Unicode data files and any associated documentation
    +(the "Data Files") or Unicode software and any associated documentation
    +(the "Software") to deal in the Data Files or Software
    +without restriction, including without limitation the rights to use,
    +copy, modify, merge, publish, distribute, and/or sell copies of
    +the Data Files or Software, and to permit persons to whom the Data Files
    +or Software are furnished to do so, provided that either
    +(a) this copyright and permission notice appear with all copies
    +of the Data Files or Software, or
    +(b) this copyright and permission notice appear in associated
    +Documentation.
    +
    +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
    +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
    +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    +NONINFRINGEMENT OF THIRD PARTY RIGHTS.
    +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
    +NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
    +DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
    +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
    +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
    +PERFORMANCE OF THE DATA FILES OR SOFTWARE.
    +
    +Except as contained in this notice, the name of a copyright holder
    +shall not be used in advertising or otherwise to promote the sale,
    +use or other dealings in these Data Files or Software without prior
    +written authorization of the copyright holder.
     
  • diff --git a/scripts/install.sh b/scripts/install.sh index df887e5d44..ebb6b69b6b 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.53.0-rc.0" +PACKAGE_VERSION="v1.53.0-rc.1" download_binary() { downloader --check From 363ec539488650ec7edfa37b6ed1ffe9b27c1a2d Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Wed, 28 Aug 2024 09:51:03 +0300 Subject: [PATCH 102/108] CircleCI Brownouts on 20.10.11 image started today. Deprecations in Sept. Trying the latest 24.x Ref: https://discuss.circleci.com/t/remote-docker-image-deprecations-and-eol-for-2024/50176 Ref: https://app.circleci.com/pipelines/github/apollographql/router/25458/workflows/4914af34-5bd0-4514-ae3d-972f6fae61f0/jobs/182686/steps --- .circleci/config.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 67f8183606..5fa0a0f608 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -734,7 +734,9 @@ jobs: value: << pipeline.project.git_url >> steps: - setup_remote_docker: - version: 20.10.11 + # CircleCI Image Policy + # https://circleci.com/docs/remote-docker-images-support-policy/ + version: 24.0.9 docker_layer_caching: true - run: name: Docker build @@ -872,7 +874,9 @@ jobs: steps: - checkout - setup_remote_docker: - version: 20.10.11 + # CircleCI Image Policy + # https://circleci.com/docs/remote-docker-images-support-policy/ + version: 24.0.9 docker_layer_caching: true - attach_workspace: at: artifacts From d213ca5ebd7e895a9a658160077f0af6dc8a3ba6 Mon Sep 17 00:00:00 2001 From: Iryna Shestak Date: Wed, 28 Aug 2024 08:56:03 +0200 Subject: [PATCH 103/108] experimental query planner mode changelog Co-authored-by: Edward Huang --- .../feat_enabling_both_best_effort_query_planners.md | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/.changesets/feat_enabling_both_best_effort_query_planners.md b/.changesets/feat_enabling_both_best_effort_query_planners.md index ee636378b0..9faf5e04e9 100644 --- a/.changesets/feat_enabling_both_best_effort_query_planners.md +++ b/.changesets/feat_enabling_both_best_effort_query_planners.md @@ -1,15 +1,12 @@ ### Enable native query planner to run in the background ([PR #5790](https://github.com/apollographql/router/pull/5790), [PR #5811](https://github.com/apollographql/router/pull/5811), [PR #5771](https://github.com/apollographql/router/pull/5771), [PR #5860](https://github.com/apollographql/router/pull/5860)) -The router now schedules background jobs to run the native query planner in -order to compare its results to the legacy implementation. This is one of the -ways to help us ascertain its correctness before making a decision to switch -entirely to the native planner. +The router now schedules background jobs to run the native (Rust) query planner to compare its results to the legacy implementation. This helps ascertain its correctness before making a decision to switch entirely to it from the legacy query planner. -The legacy query planner implementation continues to be used to plan and execute +The router continues to use the legacy query planner to plan and execute operations, so there is no effect on the hot path. -You can disable running background comparisons in the native query planner by -enabling just the `legacy` mode in router.yaml: +To disable running background comparisons with the native query planner, you can configure the router to enable only the `legacy` query planner: + ```yaml experimental_query_planner_mode: legacy ``` From 3604632f09160b7922180a5609f02a4c6706c9a4 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Wed, 28 Aug 2024 08:08:12 +0100 Subject: [PATCH 104/108] Update .changesets/fix_garypen_fix_sessions_and_handle_reporting.md Co-authored-by: Edward Huang --- .../fix_garypen_fix_sessions_and_handle_reporting.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.changesets/fix_garypen_fix_sessions_and_handle_reporting.md b/.changesets/fix_garypen_fix_sessions_and_handle_reporting.md index cb77b5aa99..f9cd880578 100644 --- a/.changesets/fix_garypen_fix_sessions_and_handle_reporting.md +++ b/.changesets/fix_garypen_fix_sessions_and_handle_reporting.md @@ -1,9 +1,7 @@ ### Fix session counting and the reporting of file handle shortage ([PR #5834](https://github.com/apollographql/router/pull/5834)) -Session counting incorrectly included connections to the health check or other non-graphql connections. This is now corrected so that only connections to the main graphql port are counted. +The router previously gave incorrect warnings about file handle shortages due to session counting incorrectly including connections to health-check connections or other non-GraphQL connections. This is now corrected so that only connections to the main GraphQL port are counted, and file handle shortages are now handled correctly as a global resource. -Warnings about file handle shortages are now handled correctly as a global resource. - -The listening logic had its own custom rate limiting notifications. This has been removed and log notification is now controlled by the [standard router log rate limiting configuration](https://www.apollographql.com/docs/router/configuration/telemetry/exporters/logging/stdout/#rate_limit) +Also, the router's port listening logic had its own custom rate-limiting of log notifications. This has been removed and replaced by the [standard router log rate limiting configuration](https://www.apollographql.com/docs/router/configuration/telemetry/exporters/logging/stdout/#rate_limit) By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/5834 From 8bf4370885c8bb63e970395d8bc33d8d73cc24e5 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Wed, 28 Aug 2024 11:01:47 +0300 Subject: [PATCH 105/108] Use Docker Engine v20.10.24 until after the release is out. Not going to try to solve the jump to v24 today. --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 5fa0a0f608..dddd8828b2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -876,7 +876,7 @@ jobs: - setup_remote_docker: # CircleCI Image Policy # https://circleci.com/docs/remote-docker-images-support-policy/ - version: 24.0.9 + version: 20.10.24 docker_layer_caching: true - attach_workspace: at: artifacts From 43a1e57d93f506b03295c659043e30f9ee2abdd2 Mon Sep 17 00:00:00 2001 From: Iryna Shestak Date: Wed, 28 Aug 2024 12:59:13 +0200 Subject: [PATCH 106/108] docs: add noIndex:true docs for experimental query planning mode (#5904) Co-authored-by: Edward Huang Co-authored-by: Chandrika Srinivasan --- .../experimental_query_planning_mode.mdx | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 docs/source/configuration/experimental_query_planning_mode.mdx diff --git a/docs/source/configuration/experimental_query_planning_mode.mdx b/docs/source/configuration/experimental_query_planning_mode.mdx new file mode 100644 index 0000000000..bbbf79ec24 --- /dev/null +++ b/docs/source/configuration/experimental_query_planning_mode.mdx @@ -0,0 +1,34 @@ +--- +title: Experimental Query Planning Mode +subtitle: Switch between legacy and native query planning +noIndex: true +--- + +The router (GraphOS Router and Apollo Router Core) is in the early stages of +transitioning to a native query planner, replacing the existing legacy planner. + +As part of the efforts to ensure correctness and stability of the new planner, +v1.53.0 of the router enables both planners to run in parallel in order to +compare them. After the comparison, the router discards the native planner's results and +uses only the legacy planner to execute requests. + +The native planner uses a single thread in the cold path of the router. It has a +bounded queue of 10 queries. If the queue is full, the router simply does not run the +comparison to avoid excessive resource consumption. + +You can disable the native query planner by configuring your `router.yaml` to use just +`legacy` planning. You may want to disable it to avoid spikes in CPU utilization, for +example if an erroneous operation fails to complete planning in the native planner's +background thread. + +```yaml title="router.yaml" +experimental_query_planner_mode: legacy +``` + +The supported modes of `experimental_query_planner_mode` are the following: +* `new`. Enables only the native query planner. +* `both_best_effort` - default. Enables comparison between legacy and new native + query planners. The legacy query planner is used for execution. If any + unsupported features are detected, the router falls back to legacy with an + `info` log. +* `legacy`. Enables only the legacy query planner. From c583d6a913f1959e8c1a1d9f282cc43d0261cfe9 Mon Sep 17 00:00:00 2001 From: Iryna Shestak Date: Wed, 28 Aug 2024 14:40:56 +0200 Subject: [PATCH 107/108] docs: rename experimental query planner mode title (#5906) --- ...ry_planning_mode.mdx => experimental_query_planner_mode.mdx} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename docs/source/configuration/{experimental_query_planning_mode.mdx => experimental_query_planner_mode.mdx} (97%) diff --git a/docs/source/configuration/experimental_query_planning_mode.mdx b/docs/source/configuration/experimental_query_planner_mode.mdx similarity index 97% rename from docs/source/configuration/experimental_query_planning_mode.mdx rename to docs/source/configuration/experimental_query_planner_mode.mdx index bbbf79ec24..67e1a2556d 100644 --- a/docs/source/configuration/experimental_query_planning_mode.mdx +++ b/docs/source/configuration/experimental_query_planner_mode.mdx @@ -1,5 +1,5 @@ --- -title: Experimental Query Planning Mode +title: Experimental Query Planner Mode subtitle: Switch between legacy and native query planning noIndex: true --- From 9cad872e912acbcd87bcfe95282e0fda2125e76d Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Wed, 28 Aug 2024 15:48:41 +0300 Subject: [PATCH 108/108] prep release: v1.53.0 (#5905) Co-authored-by: Iryna Shestak --- .../config_geal_raise_redis_timeouts.md | 5 - .../docs_tninesling_cost_docs_update.md | 5 - .changesets/feat_bnjjj_feat_417.md | 26 - .../feat_candle_exhale_deodorant_weeds.md | 28 - ...nabling_both_best_effort_query_planners.md | 14 - .changesets/feat_enhanced_observability.md | 13 - .changesets/feat_geal_v8_heap_statistics.md | 7 - .changesets/feat_helm_rollingupdate.md | 5 - .changesets/feat_propagation_format.md | 22 - .../feat_tarmac_peanuts_brandy_reverse.md | 7 - .../feat_tninesling_cost_directives.md | 30 - .../feat_tninesling_make_demand_control_ga.md | 9 - .changesets/feat_update_federation.md | 8 - .changesets/fix_bnjjj_fix_5702.md | 21 - ...bnjjj_fix_fatal_error_subgraph_response.md | 8 - ...x_bnjjj_fix_subgraph_selector_for_event.md | 23 - ...fix_bnjjj_fix_supergraph_query_selector.md | 22 - .../fix_bnjjj_improve_gt_lt_conditions.md | 23 - .changesets/fix_bryn_remote_spans.md | 16 - .changesets/fix_bryn_revert_5703.md | 5 - .changesets/fix_customer_snore_infant_wrap.md | 5 - ...rypen_fix_sessions_and_handle_reporting.md | 7 - .changesets/fix_geal_subgraph_error_path.md | 5 - .changesets/fix_missing_cache_gauge.md | 5 - .../fix_renee_consistent_uplink_type.md | 5 - .changesets/fix_renee_operation_variables.md | 13 - .../fix_tninesling_cost_result_filtering.md | 41 -- ...inesling_demand_control_score_arguments.md | 5 - ...int_bnjjj_improve_perf_custom_telemetry.md | 5 - CHANGELOG.md | 420 +++++++++++++ Cargo.lock | 8 +- apollo-federation/Cargo.toml | 2 +- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- .../templates/base/Cargo.template.toml | 2 +- .../templates/base/xtask/Cargo.template.toml | 2 +- apollo-router/Cargo.toml | 4 +- .../tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- helm/chart/router/Chart.yaml | 4 +- helm/chart/router/README.md | 6 +- licenses.html | 584 +++++++++++++++--- scripts/install.sh | 2 +- 44 files changed, 943 insertions(+), 489 deletions(-) delete mode 100644 .changesets/config_geal_raise_redis_timeouts.md delete mode 100644 .changesets/docs_tninesling_cost_docs_update.md delete mode 100644 .changesets/feat_bnjjj_feat_417.md delete mode 100644 .changesets/feat_candle_exhale_deodorant_weeds.md delete mode 100644 .changesets/feat_enabling_both_best_effort_query_planners.md delete mode 100644 .changesets/feat_enhanced_observability.md delete mode 100644 .changesets/feat_geal_v8_heap_statistics.md delete mode 100644 .changesets/feat_helm_rollingupdate.md delete mode 100644 .changesets/feat_propagation_format.md delete mode 100644 .changesets/feat_tarmac_peanuts_brandy_reverse.md delete mode 100644 .changesets/feat_tninesling_cost_directives.md delete mode 100644 .changesets/feat_tninesling_make_demand_control_ga.md delete mode 100644 .changesets/feat_update_federation.md delete mode 100644 .changesets/fix_bnjjj_fix_5702.md delete mode 100644 .changesets/fix_bnjjj_fix_fatal_error_subgraph_response.md delete mode 100644 .changesets/fix_bnjjj_fix_subgraph_selector_for_event.md delete mode 100644 .changesets/fix_bnjjj_fix_supergraph_query_selector.md delete mode 100644 .changesets/fix_bnjjj_improve_gt_lt_conditions.md delete mode 100644 .changesets/fix_bryn_remote_spans.md delete mode 100644 .changesets/fix_bryn_revert_5703.md delete mode 100644 .changesets/fix_customer_snore_infant_wrap.md delete mode 100644 .changesets/fix_garypen_fix_sessions_and_handle_reporting.md delete mode 100644 .changesets/fix_geal_subgraph_error_path.md delete mode 100644 .changesets/fix_missing_cache_gauge.md delete mode 100644 .changesets/fix_renee_consistent_uplink_type.md delete mode 100644 .changesets/fix_renee_operation_variables.md delete mode 100644 .changesets/fix_tninesling_cost_result_filtering.md delete mode 100644 .changesets/fix_tninesling_demand_control_score_arguments.md delete mode 100644 .changesets/maint_bnjjj_improve_perf_custom_telemetry.md diff --git a/.changesets/config_geal_raise_redis_timeouts.md b/.changesets/config_geal_raise_redis_timeouts.md deleted file mode 100644 index 3dec9af696..0000000000 --- a/.changesets/config_geal_raise_redis_timeouts.md +++ /dev/null @@ -1,5 +0,0 @@ -### Increase default Redis timeout ([PR #5795](https://github.com/apollographql/router/pull/5795)) - -The default Redis command timeout was increased from 2ms to 500ms to accommodate common production use cases. - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5795 \ No newline at end of file diff --git a/.changesets/docs_tninesling_cost_docs_update.md b/.changesets/docs_tninesling_cost_docs_update.md deleted file mode 100644 index 7311bdfa38..0000000000 --- a/.changesets/docs_tninesling_cost_docs_update.md +++ /dev/null @@ -1,5 +0,0 @@ -### Add sections on using @cost and @listSize to demand control docs ([PR #5839](https://github.com/apollographql/router/pull/5839)) - -Updates the demand control documentation to include details on `@cost` and `@listSize` for more accurate cost estimation. - -By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5839 diff --git a/.changesets/feat_bnjjj_feat_417.md b/.changesets/feat_bnjjj_feat_417.md deleted file mode 100644 index 7c4dc0aba5..0000000000 --- a/.changesets/feat_bnjjj_feat_417.md +++ /dev/null @@ -1,26 +0,0 @@ -### Support new telemetry trace ID format ([PR #5735](https://github.com/apollographql/router/pull/5735)) - -The router supports a new UUID format for telemetry trace IDs. - - -The following formats are supported in router configuration for trace IDs: - -* `open_telemetry` -* `hexadecimal` (same as `opentelemetry`) -* `decimal` -* `datadog` -* `uuid` (may contain dashes) - -You can configure router logging to display the formatted trace ID with `display_trace_id`: - -```yaml - telemetry: - exporters: - logging: - stdout: - format: - json: - display_trace_id: (true|false|open_telemetry|hexadecimal|decimal|datadog|uuid) -``` - -By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5735 \ No newline at end of file diff --git a/.changesets/feat_candle_exhale_deodorant_weeds.md b/.changesets/feat_candle_exhale_deodorant_weeds.md deleted file mode 100644 index 00b59a4e3f..0000000000 --- a/.changesets/feat_candle_exhale_deodorant_weeds.md +++ /dev/null @@ -1,28 +0,0 @@ -### Add warnings for invalid configuration of custom telemetry ([PR #5759](https://github.com/apollographql/router/issues/5759)) - -The router now logs warnings when running with telemetry that may have invalid custom configurations. - - -For example, you may customize telemetry using invalid conditions or inaccessible statuses: - -```yaml -telemetry: - instrumentation: - events: - subgraph: - my.event: - message: "Auditing Router Event" - level: info - on: request - attributes: - subgraph.response.status: code - # Warning: should use selector for subgraph_name: true instead of comparing strings of subgraph_name and product - condition: - eq: - - subgraph_name - - product -``` - -Although the configuration is syntactically correct, its customization is invalid, and the router now outputs warnings for such invalid configurations. - -By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5759 \ No newline at end of file diff --git a/.changesets/feat_enabling_both_best_effort_query_planners.md b/.changesets/feat_enabling_both_best_effort_query_planners.md deleted file mode 100644 index 9faf5e04e9..0000000000 --- a/.changesets/feat_enabling_both_best_effort_query_planners.md +++ /dev/null @@ -1,14 +0,0 @@ -### Enable native query planner to run in the background ([PR #5790](https://github.com/apollographql/router/pull/5790), [PR #5811](https://github.com/apollographql/router/pull/5811), [PR #5771](https://github.com/apollographql/router/pull/5771), [PR #5860](https://github.com/apollographql/router/pull/5860)) - -The router now schedules background jobs to run the native (Rust) query planner to compare its results to the legacy implementation. This helps ascertain its correctness before making a decision to switch entirely to it from the legacy query planner. - -The router continues to use the legacy query planner to plan and execute -operations, so there is no effect on the hot path. - -To disable running background comparisons with the native query planner, you can configure the router to enable only the `legacy` query planner: - -```yaml -experimental_query_planner_mode: legacy -``` - -By [SimonSapin](https://github.com/SimonSapin) in ([PR #5790](https://github.com/apollographql/router/pull/5790), [PR #5811](https://github.com/apollographql/router/pull/5811), [PR #5771](https://github.com/apollographql/router/pull/5771) [PR #5860](https://github.com/apollographql/router/pull/5860)) \ No newline at end of file diff --git a/.changesets/feat_enhanced_observability.md b/.changesets/feat_enhanced_observability.md deleted file mode 100644 index 703e0a6918..0000000000 --- a/.changesets/feat_enhanced_observability.md +++ /dev/null @@ -1,13 +0,0 @@ -### New `apollo.router.cache.storage.estimated_size` gauge ([PR #5770](https://github.com/apollographql/router/pull/5770)) - -The router supports the new metric `apollo.router.cache.storage.estimated_size` that helps users understand and monitor the amount of memory that query planner cache entries consume. - -The `apollo.router.cache.storage.estimated_size` metric gives an estimated size in bytes of a cache entry. It has the following attributes: -- `kind`: `query planner`. -- `storage`: `memory`. - -Before using the estimate to decide whether to update the cache, users should validate that the estimate correlates with their pod's memory usage. - -To learn how to troubleshoot with this metric, see the [Pods terminating due to memory pressure](https://www.apollographql.com/docs/router/containerization/kubernetes#pods-terminating-due-to-memory-pressure) guide in docs. - -By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5770 \ No newline at end of file diff --git a/.changesets/feat_geal_v8_heap_statistics.md b/.changesets/feat_geal_v8_heap_statistics.md deleted file mode 100644 index c091b108a8..0000000000 --- a/.changesets/feat_geal_v8_heap_statistics.md +++ /dev/null @@ -1,7 +0,0 @@ -### Add V8 heap usage metrics ([PR #5781](https://github.com/apollographql/router/pull/5781)) - -The router supports new gauge metrics for tracking heap memory usage of the V8 Javascript engine: -- `apollo.router.v8.heap.used`: heap memory used by V8, in bytes -- `apollo.router.v8.heap.total`: total heap allocated by V8, in bytes - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5781 \ No newline at end of file diff --git a/.changesets/feat_helm_rollingupdate.md b/.changesets/feat_helm_rollingupdate.md deleted file mode 100644 index 84094eb0a9..0000000000 --- a/.changesets/feat_helm_rollingupdate.md +++ /dev/null @@ -1,5 +0,0 @@ -### Helm: Support `maxSurge` and `maxUnavailable` for rolling updates ([Issue #5664](https://github.com/apollographql/router/issues/5664)) - -The router Helm chart now supports the configuration of `maxSurge` and `maxUnavailable` for the `RollingUpdate` deployment strategy. - -By [Jon Christiansen](https://github.com/theJC) in https://github.com/apollographql/router/pull/5665 diff --git a/.changesets/feat_propagation_format.md b/.changesets/feat_propagation_format.md deleted file mode 100644 index 9aaad0efb2..0000000000 --- a/.changesets/feat_propagation_format.md +++ /dev/null @@ -1,22 +0,0 @@ -### Add `format` for trace ID propagation. ([PR #5803](https://github.com/apollographql/router/pull/5803)) - -The router now supports specifying the format of trace IDs that are propagated to subgraphs via headers. - -You can configure the format with the `format` option: - -```yaml -telemetry: - exporters: - tracing: - propagation: - request: - header_name: "my_header" - # Must be in UUID form, with or without dashes - format: uuid -``` - -Note that incoming requests must be some form of UUID, either with or without dashes. - -To learn about supported formats, go to [`request` configuration reference](https://apollographql.com/docs/router/configuration/telemetry/exporters/tracing/overview#request-configuration-reference) docs. - -By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5803 diff --git a/.changesets/feat_tarmac_peanuts_brandy_reverse.md b/.changesets/feat_tarmac_peanuts_brandy_reverse.md deleted file mode 100644 index 1b87851a52..0000000000 --- a/.changesets/feat_tarmac_peanuts_brandy_reverse.md +++ /dev/null @@ -1,7 +0,0 @@ -### REMINDER: ADD CHANGELOG NOTICE FOR CACHE BUMP FOR FEDERATION UPGRADE - -THIS CHANGELOG SHOULD NOT BE IN THE FINAL RELEASE. THIS IS A REMINDER TO PUT THE BANNER AT THE TOP OF THE CHANGELOG DURING FINALIZATION. - -IF YOU ARE REVIEWING THIS CHANGESET, CALL THIS OUT! :) - -By [@abernix](https://github.com/abernix) \ No newline at end of file diff --git a/.changesets/feat_tninesling_cost_directives.md b/.changesets/feat_tninesling_cost_directives.md deleted file mode 100644 index 07b05cbdf6..0000000000 --- a/.changesets/feat_tninesling_cost_directives.md +++ /dev/null @@ -1,30 +0,0 @@ -### Support demand control directives ([PR #5777](https://github.com/apollographql/router/pull/5777)) - -The router supports two new demand control directives, `@cost` and `@listSize`, that you can use to provide more accurate estimates of GraphQL operation costs to the router's demand control plugin. - -Use the `@cost` directive to customize the weights of operation cost calculations, particularly for expensive resolvers. - -```graphql -type Product { - id: ID! - name: String - expensiveField: Int @cost(weight: 20) -} -``` - -Use the `@listSize` directive to provide a more accurate estimate for the size of a specific list field, particularly for those that differ greatly from the global list size estimate. - -```graphql -type Magazine { - # This is assumed to always return 5 items - headlines: [Article] @listSize(assumedSize: 5) - - # This is estimated to return as many items as are requested by the parameter named "first" - getPage(first: Int!, after: ID!): [Article] - @listSize(slicingArguments: ["first"]) -} -``` - -To learn more, go to [Demand Control](https://www.apollographql.com/docs/router/executing-operations/demand-control/) docs. - -By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5777 diff --git a/.changesets/feat_tninesling_make_demand_control_ga.md b/.changesets/feat_tninesling_make_demand_control_ga.md deleted file mode 100644 index 8f99b75478..0000000000 --- a/.changesets/feat_tninesling_make_demand_control_ga.md +++ /dev/null @@ -1,9 +0,0 @@ -### General Availability (GA) of Demand Control ([PR #5868](https://github.com/apollographql/router/pull/5868)) - -Demand control in the router is now a generally available (GA) feature. - -**GA compatibility update**: if you used demand control during its preview, to use it in GA you must update your configuration from `preview_demand_control` to `demand_control`. - -To learn more, go to [Demand Control](https://www.apollographql.com/docs/router/executing-operations/demand-control/) docs. - -By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5868 diff --git a/.changesets/feat_update_federation.md b/.changesets/feat_update_federation.md deleted file mode 100644 index b3c0670daa..0000000000 --- a/.changesets/feat_update_federation.md +++ /dev/null @@ -1,8 +0,0 @@ -### Update federation to 2.8.3 ([PR #5781](https://github.com/apollographql/router/pull/5781)) - -> [!IMPORTANT] -> If you have enabled [Distributed query plan caching](https://www.apollographql.com/docs/router/configuration/distributed-caching/#distributed-query-plan-caching), this release changes the hashing algorithm used for the cache keys. On account of this, you should anticipate additional cache regeneration cost when updating between these versions while the new hashing algorithm comes into service. - -This updates the router from federation version 2.8.1 to 2.8.3, with a [fix for fragment generation](https://github.com/apollographql/federation/pull/3043). - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5781 diff --git a/.changesets/fix_bnjjj_fix_5702.md b/.changesets/fix_bnjjj_fix_5702.md deleted file mode 100644 index d364b78f27..0000000000 --- a/.changesets/fix_bnjjj_fix_5702.md +++ /dev/null @@ -1,21 +0,0 @@ -### Fix `exists` condition for custom telemetry events ([Issue #5702](https://github.com/apollographql/router/issues/5702)) - -The router now properly handles the `exists` condition for events. The following configuration now works as intended: - -```yaml -telemetry: - instrumentation: - events: - supergraph: - my.event: - message: "Auditing Router Event" - level: info - on: request - attributes: - graphql.operation.name: true - condition: - exists: - operation_name: string -``` - -By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5759 \ No newline at end of file diff --git a/.changesets/fix_bnjjj_fix_fatal_error_subgraph_response.md b/.changesets/fix_bnjjj_fix_fatal_error_subgraph_response.md deleted file mode 100644 index b36b9ab32b..0000000000 --- a/.changesets/fix_bnjjj_fix_fatal_error_subgraph_response.md +++ /dev/null @@ -1,8 +0,0 @@ -### fix(subgraph_service): when the subgraph connection is closed or in error, return a proper subgraph response ([PR #5859](https://github.com/apollographql/router/pull/5859)) - - -The router now returns a proper subgraph response, with an error if necessary, when a subgraph connection is closed or returns an error. - -Previously, this issue prevented the subgraph response service from being triggered in coprocessors or Rhai scripts. - -By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5859 \ No newline at end of file diff --git a/.changesets/fix_bnjjj_fix_subgraph_selector_for_event.md b/.changesets/fix_bnjjj_fix_subgraph_selector_for_event.md deleted file mode 100644 index 9894b91622..0000000000 --- a/.changesets/fix_bnjjj_fix_subgraph_selector_for_event.md +++ /dev/null @@ -1,23 +0,0 @@ -### Evaluate selectors in response stage when possible ([PR #5725](https://github.com/apollographql/router/pull/5725)) - - -The router now supports having various supergraph selectors on response events. - -Because `events` are triggered at a specific event (`request`|`response`|`error`), you usually have only one condition for a related event. You can however have selectors that can be applied to several events, like `subgraph_name` to get the subgraph name). - -Example of an event to log the raw subgraph response only on a subgraph named `products`, this was not working before. - -```yaml -telemetry: - instrumentation: - events: - subgraph: - response: - level: info - condition: - eq: - - subgraph_name: true - - "products" -``` - -By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5725 \ No newline at end of file diff --git a/.changesets/fix_bnjjj_fix_supergraph_query_selector.md b/.changesets/fix_bnjjj_fix_supergraph_query_selector.md deleted file mode 100644 index 4546589f00..0000000000 --- a/.changesets/fix_bnjjj_fix_supergraph_query_selector.md +++ /dev/null @@ -1,22 +0,0 @@ -### Support supergraph query selector for events ([PR #5764](https://github.com/apollographql/router/pull/5764)) - -The router now supports the `query: root_fields` selector for `event_response`. Previously the selector worked for `response` stage events but didn't work for `event_response`. - -The following configuration for a `query: root_fields` on an `event_response` now works: - -```yaml -telemetry: - instrumentation: - events: - supergraph: - OPERATION_LIMIT_INFO: - message: operation limit info - on: event_response - level: info - attributes: - graphql.operation.name: true - query.root_fields: - query: root_fields -``` - -By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5764 \ No newline at end of file diff --git a/.changesets/fix_bnjjj_improve_gt_lt_conditions.md b/.changesets/fix_bnjjj_improve_gt_lt_conditions.md deleted file mode 100644 index 8923ee459b..0000000000 --- a/.changesets/fix_bnjjj_improve_gt_lt_conditions.md +++ /dev/null @@ -1,23 +0,0 @@ -### Support `gt`/`lt` conditions for parsing string selectors to numbers ([PR #5758](https://github.com/apollographql/router/pull/5758)) - -The router now supports greater than (`gt`) and less than (`lt`) conditions for header selectors. - -The following example applies an attribute on a span if the `content-length` header is greater than 100: - -```yaml -telemetry: - instrumentation: - spans: - mode: spec_compliant - router: - attributes: - trace_id: true - payload_is_to_big: # Set this attribute to true if the value of content-length header is > than 100 - static: true - condition: - gt: - - request_header: "content-length" - - 100 -``` - -By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5758 \ No newline at end of file diff --git a/.changesets/fix_bryn_remote_spans.md b/.changesets/fix_bryn_remote_spans.md deleted file mode 100644 index 76eb80956d..0000000000 --- a/.changesets/fix_bryn_remote_spans.md +++ /dev/null @@ -1,16 +0,0 @@ -### Fix trace propagation via header ([PR #5802](https://github.com/apollographql/router/pull/5802)) - -The router now correctly propagates trace IDs when using the `propagation.request.header_name` configuration option. - -```yaml -telemetry: - exporters: - tracing: - propagation: - request: - header_name: "id_from_header" -``` - -Previously, trace IDs weren't transferred to the root span of the request, causing spans to be incorrectly attributed to new traces. - -By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5802 diff --git a/.changesets/fix_bryn_revert_5703.md b/.changesets/fix_bryn_revert_5703.md deleted file mode 100644 index bc072c4a8b..0000000000 --- a/.changesets/fix_bryn_revert_5703.md +++ /dev/null @@ -1,5 +0,0 @@ -### Fix Datadog underreporting APM metrics ([PR #5780](https://github.com/apollographql/router/pull/5780)) - -The previous [PR #5703](https://github.com/apollographql/router/pull/5703) has been reverted in this release because it caused Datadog to underreport APM span metrics. - -By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5780 diff --git a/.changesets/fix_customer_snore_infant_wrap.md b/.changesets/fix_customer_snore_infant_wrap.md deleted file mode 100644 index c69784386d..0000000000 --- a/.changesets/fix_customer_snore_infant_wrap.md +++ /dev/null @@ -1,5 +0,0 @@ -### Enable progressive override with federation 2.7 and above ([PR #5754](https://github.com/apollographql/router/pull/5754)) - -The progressive override feature is now available when using Federation v2.7 and above. - -By [@o0ignition0o](https://github.com/o0ignition0o) in https://github.com/apollographql/router/pull/5754 diff --git a/.changesets/fix_garypen_fix_sessions_and_handle_reporting.md b/.changesets/fix_garypen_fix_sessions_and_handle_reporting.md deleted file mode 100644 index f9cd880578..0000000000 --- a/.changesets/fix_garypen_fix_sessions_and_handle_reporting.md +++ /dev/null @@ -1,7 +0,0 @@ -### Fix session counting and the reporting of file handle shortage ([PR #5834](https://github.com/apollographql/router/pull/5834)) - -The router previously gave incorrect warnings about file handle shortages due to session counting incorrectly including connections to health-check connections or other non-GraphQL connections. This is now corrected so that only connections to the main GraphQL port are counted, and file handle shortages are now handled correctly as a global resource. - -Also, the router's port listening logic had its own custom rate-limiting of log notifications. This has been removed and replaced by the [standard router log rate limiting configuration](https://www.apollographql.com/docs/router/configuration/telemetry/exporters/logging/stdout/#rate_limit) - -By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/5834 diff --git a/.changesets/fix_geal_subgraph_error_path.md b/.changesets/fix_geal_subgraph_error_path.md deleted file mode 100644 index f68b129de2..0000000000 --- a/.changesets/fix_geal_subgraph_error_path.md +++ /dev/null @@ -1,5 +0,0 @@ -### Set subgraph error path if not present ([PR #5773](https://github.com/apollographql/router/pull/5773)) - -The router now sets the error path in all cases during subgraph response conversion. Previously the router's subgraph service didn't set the error path for some network-level errors. - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5773 \ No newline at end of file diff --git a/.changesets/fix_missing_cache_gauge.md b/.changesets/fix_missing_cache_gauge.md deleted file mode 100644 index 1b71523210..0000000000 --- a/.changesets/fix_missing_cache_gauge.md +++ /dev/null @@ -1,5 +0,0 @@ -### Fix missing `apollo_router_cache_size` metric ([PR #5770](https://github.com/apollographql/router/pull/5770)) - -Previously, if the in-memory cache wasn't mutated, the `apollo_router_cache_size` metric wouldn't be available. This has been fixed in this release. - -By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5770 diff --git a/.changesets/fix_renee_consistent_uplink_type.md b/.changesets/fix_renee_consistent_uplink_type.md deleted file mode 100644 index 7a6e044edf..0000000000 --- a/.changesets/fix_renee_consistent_uplink_type.md +++ /dev/null @@ -1,5 +0,0 @@ -### Fix inconsistent `type` attribute in `apollo.router.uplink.fetch.duration` metric ([PR #5816](https://github.com/apollographql/router/pull/5816)) - -The router now always reports a short name in the `type` attribute for the `apollo.router.fetch.duration` metric, instead of sometimes using a fully-qualified Rust path and sometimes using a short name. - -By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/5816 diff --git a/.changesets/fix_renee_operation_variables.md b/.changesets/fix_renee_operation_variables.md deleted file mode 100644 index 5b72677432..0000000000 --- a/.changesets/fix_renee_operation_variables.md +++ /dev/null @@ -1,13 +0,0 @@ -### Fix GraphQL query directives validation bug ([PR #5753](https://github.com/apollographql/router/pull/5753)) - -The router now supports GraphQL queries where a variable is used in a directive on the same operation where the variable is declared. - -For example, the following query both declares and uses `$var`: - -```graphql -query GetSomething($var: Int!) @someDirective(argument: $var) { - something -} -``` - -By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/5753 diff --git a/.changesets/fix_tninesling_cost_result_filtering.md b/.changesets/fix_tninesling_cost_result_filtering.md deleted file mode 100644 index 57bdfbe67f..0000000000 --- a/.changesets/fix_tninesling_cost_result_filtering.md +++ /dev/null @@ -1,41 +0,0 @@ -### Fix cost result filtering for custom metrics ([PR #5838](https://github.com/apollographql/router/pull/5838)) - -The router can now filter for custom metrics that use demand control cost information in their conditions. This allows a telemetry config such as the following: - -```yaml -telemetry: - instrumentation: - instruments: - supergraph: - cost.rejected.operations: - type: histogram - value: - cost: estimated - description: "Estimated cost per rejected operation." - unit: delta - condition: - eq: - - cost: result - - "COST_ESTIMATED_TOO_EXPENSIVE" -``` - -This also fixes an issue where attribute comparisons would fail silently when comparing integers to float values. Users can now write integer values in conditions that compare against selectors that select floats: - -```yaml -telemetry: - instrumentation: - instruments: - supergraph: - cost.rejected.operations: - type: histogram - value: - cost: actual - description: "Estimated cost per rejected operation." - unit: delta - condition: - gt: - - cost: delta - - 1 -``` - -By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5838 diff --git a/.changesets/fix_tninesling_demand_control_score_arguments.md b/.changesets/fix_tninesling_demand_control_score_arguments.md deleted file mode 100644 index 200523fb00..0000000000 --- a/.changesets/fix_tninesling_demand_control_score_arguments.md +++ /dev/null @@ -1,5 +0,0 @@ -### Add argument cost to type cost in demand control scoring algorithm ([PR #5740](https://github.com/apollographql/router/pull/5740)) - -The router's operation scoring algorithm for demand control now includes field arguments in the type cost. - -By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5740 diff --git a/.changesets/maint_bnjjj_improve_perf_custom_telemetry.md b/.changesets/maint_bnjjj_improve_perf_custom_telemetry.md deleted file mode 100644 index 722e5ad6ed..0000000000 --- a/.changesets/maint_bnjjj_improve_perf_custom_telemetry.md +++ /dev/null @@ -1,5 +0,0 @@ -### Improve performance by optimizing telemetry meter and instrument creation ([PR #5629](https://github.com/apollographql/router/pull/5629)) - -The router's performance has been improved by removing telemetry creation out of the critical path, from being created in every service to being created when starting the telemetry plugin. - -By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5629 \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 8fc01da455..987aa1a79c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,426 @@ All notable changes to Router will be documented in this file. This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html). +# [1.53.0] - 2024-08-28 + +> [!IMPORTANT] +> If you have enabled [Distributed query plan caching](https://www.apollographql.com/docs/router/configuration/distributed-caching/#distributed-query-plan-caching), this release changes the hashing algorithm used for the cache keys. On account of this, you should anticipate additional cache regeneration cost when updating between these versions while the new hashing algorithm comes into service. + +## 🚀 Features + +### Support demand control directives ([PR #5777](https://github.com/apollographql/router/pull/5777)) + +> ⚠️ This is a [GraphOS Router feature](https://www.apollographql.com/graphos-router). + +The router supports two new demand control directives, `@cost` and `@listSize`, that you can use to provide more accurate estimates of GraphQL operation costs to the router's demand control plugin. + +Use the `@cost` directive to customize the weights of operation cost calculations, particularly for expensive resolvers. + +```graphql +type Product { + id: ID! + name: String + expensiveField: Int @cost(weight: 20) +} +``` + +Use the `@listSize` directive to provide a more accurate estimate for the size of a specific list field, particularly for those that differ greatly from the global list size estimate. + +```graphql +type Magazine { + # This is assumed to always return 5 items + headlines: [Article] @listSize(assumedSize: 5) + + # This is estimated to return as many items as are requested by the parameter named "first" + getPage(first: Int!, after: ID!): [Article] + @listSize(slicingArguments: ["first"]) +} +``` + +To learn more, go to [Demand Control](https://www.apollographql.com/docs/router/executing-operations/demand-control/) docs. + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5777 + +### General Availability (GA) of Demand Control ([PR #5868](https://github.com/apollographql/router/pull/5868)) + +Demand control in the router is now a generally available (GA) feature. + +**GA compatibility update**: if you used demand control during its preview, to use it in GA you must update your configuration from `preview_demand_control` to `demand_control`. + +To learn more, go to [Demand Control](https://www.apollographql.com/docs/router/executing-operations/demand-control/) docs. + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5868 + +### Enable native query planner to run in the background ([PR #5790](https://github.com/apollographql/router/pull/5790), [PR #5811](https://github.com/apollographql/router/pull/5811), [PR #5771](https://github.com/apollographql/router/pull/5771), [PR #5860](https://github.com/apollographql/router/pull/5860)) + +The router now schedules background jobs to run the native (Rust) query planner to compare its results to the legacy implementation. This helps ascertain its correctness before making a decision to switch entirely to it from the legacy query planner. + +To learn more, go to [Experimental Query Planner Mode](https://www.apollographql.com/docs/router/configuration/configuration/experimental_query_planner_mode) docs. + +The router continues to use the legacy query planner to plan and execute operations, so there is no effect on the hot path. + +To disable running background comparisons with the native query planner, you can configure the router to enable only the `legacy` query planner: + +```yaml +experimental_query_planner_mode: legacy +``` + +By [SimonSapin](https://github.com/SimonSapin) in ([PR #5790](https://github.com/apollographql/router/pull/5790), [PR #5811](https://github.com/apollographql/router/pull/5811), [PR #5771](https://github.com/apollographql/router/pull/5771) [PR #5860](https://github.com/apollographql/router/pull/5860)) + +### Add warnings for invalid configuration of custom telemetry ([PR #5759](https://github.com/apollographql/router/issues/5759)) + +The router now logs warnings when running with telemetry that may have invalid custom configurations. + +For example, you may customize telemetry using invalid conditions or inaccessible statuses: + +```yaml +telemetry: + instrumentation: + events: + subgraph: + my.event: + message: "Auditing Router Event" + level: info + on: request + attributes: + subgraph.response.status: code + # Warning: should use selector for subgraph_name: true instead of comparing strings of subgraph_name and product + condition: + eq: + - subgraph_name + - product +``` + +Although the configuration is syntactically correct, its customization is invalid, and the router now outputs warnings for such invalid configurations. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5759 + +### Add V8 heap usage metrics ([PR #5781](https://github.com/apollographql/router/pull/5781)) + +The router supports new gauge metrics for tracking heap memory usage of the V8 Javascript engine: +- `apollo.router.v8.heap.used`: heap memory used by V8, in bytes +- `apollo.router.v8.heap.total`: total heap allocated by V8, in bytes + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5781 + +### Update Federation to v2.9.0 ([PR #5902](https://github.com/apollographql/router/pull/5902)) + +This updates the router to Federation v2.9.0. + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5902 + +### Helm: Support `maxSurge` and `maxUnavailable` for rolling updates ([Issue #5664](https://github.com/apollographql/router/issues/5664)) + +The router Helm chart now supports the configuration of `maxSurge` and `maxUnavailable` for the `RollingUpdate` deployment strategy. + +By [Jon Christiansen](https://github.com/theJC) in https://github.com/apollographql/router/pull/5665 + +### Support new telemetry trace ID format ([PR #5735](https://github.com/apollographql/router/pull/5735)) + +The router supports a new UUID format for telemetry trace IDs. + +The following formats are supported in router configuration for trace IDs: + +* `open_telemetry` +* `hexadecimal` (same as `opentelemetry`) +* `decimal` +* `datadog` +* `uuid` (may contain dashes) + +You can configure router logging to display the formatted trace ID with `display_trace_id`: + +```yaml + telemetry: + exporters: + logging: + stdout: + format: + json: + display_trace_id: (true|false|open_telemetry|hexadecimal|decimal|datadog|uuid) +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5735 + +### Add `format` for trace ID propagation. ([PR #5803](https://github.com/apollographql/router/pull/5803)) + +The router now supports specifying the format of trace IDs that are propagated to subgraphs via headers. + +You can configure the format with the `format` option: + +```yaml +telemetry: + exporters: + tracing: + propagation: + request: + header_name: "my_header" + # Must be in UUID form, with or without dashes + format: uuid +``` + +Note that incoming requests must be some form of UUID, either with or without dashes. + +To learn about supported formats, go to [`request` configuration reference](https://apollographql.com/docs/router/configuration/telemetry/exporters/tracing/overview#request-configuration-reference) docs. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5803 + +### New `apollo.router.cache.storage.estimated_size` gauge ([PR #5770](https://github.com/apollographql/router/pull/5770)) + +The router supports the new metric `apollo.router.cache.storage.estimated_size` that helps users understand and monitor the amount of memory that query planner cache entries consume. + +The `apollo.router.cache.storage.estimated_size` metric gives an estimated size in bytes of a cache entry. It has the following attributes: +- `kind`: `query planner`. +- `storage`: `memory`. + +Before using the estimate to decide whether to update the cache, users should validate that the estimate correlates with their pod's memory usage. + +To learn how to troubleshoot with this metric, see the [Pods terminating due to memory pressure](https://www.apollographql.com/docs/router/containerization/kubernetes#pods-terminating-due-to-memory-pressure) guide in docs. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5770 + +## 🐛 Fixes + +### Fix GraphQL query directives validation bug ([PR #5753](https://github.com/apollographql/router/pull/5753)) + +The router now supports GraphQL queries where a variable is used in a directive on the same operation where the variable is declared. + +For example, the following query both declares and uses `$var`: + +```graphql +query GetSomething(: Int!) @someDirective(argument: $var) { + something +} +``` + +By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/5753 + +### Evaluate selectors in response stage when possible ([PR #5725](https://github.com/apollographql/router/pull/5725)) + +The router now supports having various supergraph selectors on response events. + +Because `events` are triggered at a specific event (`request`|`response`|`error`), you usually have only one condition for a related event. You can however have selectors that can be applied to several events, like `subgraph_name` to get the subgraph name). + +Example of an event to log the raw subgraph response only on a subgraph named `products`, this was not working before. + +```yaml +telemetry: + instrumentation: + events: + subgraph: + response: + level: info + condition: + eq: + - subgraph_name: true + - "products" +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5725 + +### Fix trace propagation via header ([PR #5802](https://github.com/apollographql/router/pull/5802)) + +The router now correctly propagates trace IDs when using the `propagation.request.header_name` configuration option. + +```yaml +telemetry: + exporters: + tracing: + propagation: + request: + header_name: "id_from_header" +``` + +Previously, trace IDs weren't transferred to the root span of the request, causing spans to be incorrectly attributed to new traces. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5802 + +### Add argument cost to type cost in demand control scoring algorithm ([PR #5740](https://github.com/apollographql/router/pull/5740)) + +The router's operation scoring algorithm for demand control now includes field arguments in the type cost. + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5740 + +### Support `gt`/`lt` conditions for parsing string selectors to numbers ([PR #5758](https://github.com/apollographql/router/pull/5758)) + +The router now supports greater than (`gt`) and less than (`lt`) conditions for header selectors. + +The following example applies an attribute on a span if the `content-length` header is greater than 100: + +```yaml +telemetry: + instrumentation: + spans: + mode: spec_compliant + router: + attributes: + trace_id: true + payload_is_to_big: # Set this attribute to true if the value of content-length header is > than 100 + static: true + condition: + gt: + - request_header: "content-length" + - 100 +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5758 + +### Set subgraph error path if not present ([PR #5773](https://github.com/apollographql/router/pull/5773)) + +The router now sets the error path in all cases during subgraph response conversion. Previously the router's subgraph service didn't set the error path for some network-level errors. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5773 + +### Fix cost result filtering for custom metrics ([PR #5838](https://github.com/apollographql/router/pull/5838)) + +The router can now filter for custom metrics that use demand control cost information in their conditions. This allows a telemetry config such as the following: + +```yaml +telemetry: + instrumentation: + instruments: + supergraph: + cost.rejected.operations: + type: histogram + value: + cost: estimated + description: "Estimated cost per rejected operation." + unit: delta + condition: + eq: + - cost: result + - "COST_ESTIMATED_TOO_EXPENSIVE" +``` + +This also fixes an issue where attribute comparisons would fail silently when comparing integers to float values. Users can now write integer values in conditions that compare against selectors that select floats: + +```yaml +telemetry: + instrumentation: + instruments: + supergraph: + cost.rejected.operations: + type: histogram + value: + cost: actual + description: "Estimated cost per rejected operation." + unit: delta + condition: + gt: + - cost: delta + - 1 +``` + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5838 + +### Fix missing `apollo_router_cache_size` metric ([PR #5770](https://github.com/apollographql/router/pull/5770)) + +Previously, if the in-memory cache wasn't mutated, the `apollo_router_cache_size` metric wouldn't be available. This has been fixed in this release. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5770 + +### Interrupted subgraph connections trigger error responses and subgraph service hook points ([PR #5859](https://github.com/apollographql/router/pull/5859)) + +The router now returns a proper subgraph response, with an error if necessary, when a subgraph connection is closed or returns an error. + +Previously, this issue prevented the subgraph response service from being triggered in coprocessors or Rhai scripts. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5859 + +### Fix `exists` condition for custom telemetry events ([Issue #5702](https://github.com/apollographql/router/issues/5702)) + +The router now properly handles the `exists` condition for events. The following configuration now works as intended: + +```yaml +telemetry: + instrumentation: + events: + supergraph: + my.event: + message: "Auditing Router Event" + level: info + on: request + attributes: + graphql.operation.name: true + condition: + exists: + operation_name: string +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5759 + +### Fix Datadog underreporting APM metrics ([PR #5780](https://github.com/apollographql/router/pull/5780)) + +The previous [PR #5703](https://github.com/apollographql/router/pull/5703) has been reverted in this release because it caused Datadog to underreport APM span metrics. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5780 + +### Fix inconsistent `type` attribute in `apollo.router.uplink.fetch.duration` metric ([PR #5816](https://github.com/apollographql/router/pull/5816)) + +The router now always reports a short name in the `type` attribute for the `apollo.router.fetch.duration` metric, instead of sometimes using a fully-qualified Rust path and sometimes using a short name. + +By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/5816 + +### Enable progressive override with Federation 2.7 and above ([PR #5754](https://github.com/apollographql/router/pull/5754)) + +The progressive override feature is now available when using Federation v2.7 and above. + +By [@o0ignition0o](https://github.com/o0ignition0o) in https://github.com/apollographql/router/pull/5754 + +### Support supergraph query selector for events ([PR #5764](https://github.com/apollographql/router/pull/5764)) + +The router now supports the `query: root_fields` selector for `event_response`. Previously the selector worked for `response` stage events but didn't work for `event_response`. + +The following configuration for a `query: root_fields` on an `event_response` now works: + +```yaml +telemetry: + instrumentation: + events: + supergraph: + OPERATION_LIMIT_INFO: + message: operation limit info + on: event_response + level: info + attributes: + graphql.operation.name: true + query.root_fields: + query: root_fields +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5764 + +### Fix session counting and the reporting of file handle shortage ([PR #5834](https://github.com/apollographql/router/pull/5834)) + +The router previously gave incorrect warnings about file handle shortages due to session counting incorrectly including connections to health-check connections or other non-GraphQL connections. This is now corrected so that only connections to the main GraphQL port are counted, and file handle shortages are now handled correctly as a global resource. + +Also, the router's port listening logic had its own custom rate-limiting of log notifications. This has been removed and replaced by the [standard router log rate limiting configuration](https://www.apollographql.com/docs/router/configuration/telemetry/exporters/logging/stdout/#rate_limit) + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/5834 + +## 📃 Configuration + +### Increase default Redis timeout ([PR #5795](https://github.com/apollographql/router/pull/5795)) + +The default Redis command timeout was increased from 2ms to 500ms to accommodate common production use cases. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5795 + +## 🛠 Maintenance + +### Improve performance by optimizing telemetry meter and instrument creation ([PR #5629](https://github.com/apollographql/router/pull/5629)) + +The router's performance has been improved by removing telemetry creation out of the critical path, from being created in every service to being created when starting the telemetry plugin. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5629 + +## 📚 Documentation + +### Add sections on using `@cost` and `@listSize` to demand control docs ([PR #5839](https://github.com/apollographql/router/pull/5839)) + +Updates the demand control documentation to include details on `@cost` and `@listSize` for more accurate cost estimation. + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5839 + # [1.52.1] - 2024-08-27 > [!IMPORTANT] diff --git a/Cargo.lock b/Cargo.lock index ac59d9dd47..8a99dd3deb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -178,7 +178,7 @@ dependencies = [ [[package]] name = "apollo-federation" -version = "1.53.0-rc.1" +version = "1.53.0" dependencies = [ "apollo-compiler", "derive_more", @@ -229,7 +229,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.53.0-rc.1" +version = "1.53.0" dependencies = [ "access-json", "ahash", @@ -398,7 +398,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.53.0-rc.1" +version = "1.53.0" dependencies = [ "apollo-parser", "apollo-router", @@ -414,7 +414,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.53.0-rc.1" +version = "1.53.0" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/apollo-federation/Cargo.toml b/apollo-federation/Cargo.toml index 0f62efa5b6..220c25b371 100644 --- a/apollo-federation/Cargo.toml +++ b/apollo-federation/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-federation" -version = "1.53.0-rc.1" +version = "1.53.0" authors = ["The Apollo GraphQL Contributors"] edition = "2021" description = "Apollo Federation" diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index 4c84421402..c051b44df7 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.53.0-rc.1" +version = "1.53.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index b4217e82b5..5cf55bfeda 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.53.0-rc.1" +version = "1.53.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.template.toml b/apollo-router-scaffold/templates/base/Cargo.template.toml index b66b95c69c..9953fb981c 100644 --- a/apollo-router-scaffold/templates/base/Cargo.template.toml +++ b/apollo-router-scaffold/templates/base/Cargo.template.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.53.0-rc.1" +apollo-router = "1.53.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml index e3fa00ed97..482be08889 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.53.0-rc.1" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.53.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 6265d716fa..19950eb520 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.53.0-rc.1" +version = "1.53.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" @@ -68,7 +68,7 @@ askama = "0.12.1" access-json = "0.1.0" anyhow = "1.0.86" apollo-compiler.workspace = true -apollo-federation = { path = "../apollo-federation", version = "=1.53.0-rc.1" } +apollo-federation = { path = "../apollo-federation", version = "=1.53.0" } arc-swap = "1.6.0" async-channel = "1.9.0" async-compression = { version = "0.4.6", features = [ diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index 3043bbd6ff..83c20bb40e 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.53.0-rc.1 + image: ghcr.io/apollographql/router:v1.53.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index f8364b6fbc..94900947d1 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.53.0-rc.1 + image: ghcr.io/apollographql/router:v1.53.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index 42ac11aa6d..f719e3de99 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.53.0-rc.1 + image: ghcr.io/apollographql/router:v1.53.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index 58f0f433e7..40ba75ba53 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.53.0-rc.1 +version: 1.53.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.53.0-rc.1" +appVersion: "v1.53.0" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index 802b6ddd61..e962582547 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.53.0-rc.1](https://img.shields.io/badge/Version-1.53.0--rc.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.53.0-rc.1](https://img.shields.io/badge/AppVersion-v1.53.0--rc.1-informational?style=flat-square) +![Version: 1.53.0](https://img.shields.io/badge/Version-1.53.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.53.0](https://img.shields.io/badge/AppVersion-v1.53.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0-rc.1 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0-rc.1 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0-rc.1 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0 --values my-values.yaml ``` _See [configuration](#configuration) below._ diff --git a/licenses.html b/licenses.html index 1725f7e24d..37df6f6803 100644 --- a/licenses.html +++ b/licenses.html @@ -44,8 +44,8 @@

    Third Party Licenses

    Overview of licenses:

    + +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    +                                 Apache License
    +                           Version 2.0, January 2004
    +                        https://www.apache.org/licenses/
    +
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +   1. Definitions.
    +
    +      "License" shall mean the terms and conditions for use, reproduction,
    +      and distribution as defined by Sections 1 through 9 of this document.
    +
    +      "Licensor" shall mean the copyright owner or entity authorized by
    +      the copyright owner that is granting the License.
    +
    +      "Legal Entity" shall mean the union of the acting entity and all
    +      other entities that control, are controlled by, or are under common
    +      control with that entity. For the purposes of this definition,
    +      "control" means (i) the power, direct or indirect, to cause the
    +      direction or management of such entity, whether by contract or
    +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +      outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +      "You" (or "Your") shall mean an individual or Legal Entity
    +      exercising permissions granted by this License.
    +
    +      "Source" form shall mean the preferred form for making modifications,
    +      including but not limited to software source code, documentation
    +      source, and configuration files.
    +
    +      "Object" form shall mean any form resulting from mechanical
    +      transformation or translation of a Source form, including but
    +      not limited to compiled object code, generated documentation,
    +      and conversions to other media types.
    +
    +      "Work" shall mean the work of authorship, whether in Source or
    +      Object form, made available under the License, as indicated by a
    +      copyright notice that is included in or attached to the work
    +      (an example is provided in the Appendix below).
    +
    +      "Derivative Works" shall mean any work, whether in Source or Object
    +      form, that is based on (or derived from) the Work and for which the
    +      editorial revisions, annotations, elaborations, or other modifications
    +      represent, as a whole, an original work of authorship. For the purposes
    +      of this License, Derivative Works shall not include works that remain
    +      separable from, or merely link (or bind by name) to the interfaces of,
    +      the Work and Derivative Works thereof.
    +
    +      "Contribution" shall mean any work of authorship, including
    +      the original version of the Work and any modifications or additions
    +      to that Work or Derivative Works thereof, that is intentionally
    +      submitted to Licensor for inclusion in the Work by the copyright owner
    +      or by an individual or Legal Entity authorized to submit on behalf of
    +      the copyright owner. For the purposes of this definition, "submitted"
    +      means any form of electronic, verbal, or written communication sent
    +      to the Licensor or its representatives, including but not limited to
    +      communication on electronic mailing lists, source code control systems,
    +      and issue tracking systems that are managed by, or on behalf of, the
    +      Licensor for the purpose of discussing and improving the Work, but
    +      excluding communication that is conspicuously marked or otherwise
    +      designated in writing by the copyright owner as "Not a Contribution."
    +
    +      "Contributor" shall mean Licensor and any individual or Legal Entity
    +      on behalf of whom a Contribution has been received by Licensor and
    +      subsequently incorporated within the Work.
    +
    +   2. Grant of Copyright License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      copyright license to reproduce, prepare Derivative Works of,
    +      publicly display, publicly perform, sublicense, and distribute the
    +      Work and such Derivative Works in Source or Object form.
    +
    +   3. Grant of Patent License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      (except as stated in this section) patent license to make, have made,
    +      use, offer to sell, sell, import, and otherwise transfer the Work,
    +      where such license applies only to those patent claims licensable
    +      by such Contributor that are necessarily infringed by their
    +      Contribution(s) alone or by combination of their Contribution(s)
    +      with the Work to which such Contribution(s) was submitted. If You
    +      institute patent litigation against any entity (including a
    +      cross-claim or counterclaim in a lawsuit) alleging that the Work
    +      or a Contribution incorporated within the Work constitutes direct
    +      or contributory patent infringement, then any patent licenses
    +      granted to You under this License for that Work shall terminate
    +      as of the date such litigation is filed.
    +
    +   4. Redistribution. You may reproduce and distribute copies of the
    +      Work or Derivative Works thereof in any medium, with or without
    +      modifications, and in Source or Object form, provided that You
    +      meet the following conditions:
    +
    +      (a) You must give any other recipients of the Work or
    +          Derivative Works a copy of this License; and
    +
    +      (b) You must cause any modified files to carry prominent notices
    +          stating that You changed the files; and
    +
    +      (c) You must retain, in the Source form of any Derivative Works
    +          that You distribute, all copyright, patent, trademark, and
    +          attribution notices from the Source form of the Work,
    +          excluding those notices that do not pertain to any part of
    +          the Derivative Works; and
    +
    +      (d) If the Work includes a "NOTICE" text file as part of its
    +          distribution, then any Derivative Works that You distribute must
    +          include a readable copy of the attribution notices contained
    +          within such NOTICE file, excluding those notices that do not
    +          pertain to any part of the Derivative Works, in at least one
    +          of the following places: within a NOTICE text file distributed
    +          as part of the Derivative Works; within the Source form or
    +          documentation, if provided along with the Derivative Works; or,
    +          within a display generated by the Derivative Works, if and
    +          wherever such third-party notices normally appear. The contents
    +          of the NOTICE file are for informational purposes only and
    +          do not modify the License. You may add Your own attribution
    +          notices within Derivative Works that You distribute, alongside
    +          or as an addendum to the NOTICE text from the Work, provided
    +          that such additional attribution notices cannot be construed
    +          as modifying the License.
    +
    +      You may add Your own copyright statement to Your modifications and
    +      may provide additional or different license terms and conditions
    +      for use, reproduction, or distribution of Your modifications, or
    +      for any such Derivative Works as a whole, provided Your use,
    +      reproduction, and distribution of the Work otherwise complies with
    +      the conditions stated in this License.
    +
    +   5. Submission of Contributions. Unless You explicitly state otherwise,
    +      any Contribution intentionally submitted for inclusion in the Work
    +      by You to the Licensor shall be under the terms and conditions of
    +      this License, without any additional terms or conditions.
    +      Notwithstanding the above, nothing herein shall supersede or modify
    +      the terms of any separate license agreement you may have executed
    +      with Licensor regarding such Contributions.
    +
    +   6. Trademarks. This License does not grant permission to use the trade
    +      names, trademarks, service marks, or product names of the Licensor,
    +      except as required for reasonable and customary use in describing the
    +      origin of the Work and reproducing the content of the NOTICE file.
    +
    +   7. Disclaimer of Warranty. Unless required by applicable law or
    +      agreed to in writing, Licensor provides the Work (and each
    +      Contributor provides its Contributions) on an "AS IS" BASIS,
    +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +      implied, including, without limitation, any warranties or conditions
    +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +      PARTICULAR PURPOSE. You are solely responsible for determining the
    +      appropriateness of using or redistributing the Work and assume any
    +      risks associated with Your exercise of permissions under this License.
    +
    +   8. Limitation of Liability. In no event and under no legal theory,
    +      whether in tort (including negligence), contract, or otherwise,
    +      unless required by applicable law (such as deliberate and grossly
    +      negligent acts) or agreed to in writing, shall any Contributor be
    +      liable to You for damages, including any direct, indirect, special,
    +      incidental, or consequential damages of any character arising as a
    +      result of this License or out of the use or inability to use the
    +      Work (including but not limited to damages for loss of goodwill,
    +      work stoppage, computer failure or malfunction, or any and all
    +      other commercial damages or losses), even if such Contributor
    +      has been advised of the possibility of such damages.
    +
    +   9. Accepting Warranty or Additional Liability. While redistributing
    +      the Work or Derivative Works thereof, You may choose to offer,
    +      and charge a fee for, acceptance of support, warranty, indemnity,
    +      or other liability obligations and/or rights consistent with this
    +      License. However, in accepting such obligations, You may act only
    +      on Your own behalf and on Your sole responsibility, not on behalf
    +      of any other Contributor, and only if You agree to indemnify,
    +      defend, and hold each Contributor harmless for any liability
    +      incurred by, or claims asserted against, such Contributor by reason
    +      of your accepting any such warranty or additional liability.
    +
    +   END OF TERMS AND CONDITIONS
     
  • @@ -2989,6 +3175,7 @@

    Used by:

  • clap_builder
  • clap_derive
  • clap_lex
  • +
  • opentelemetry-proto
  •                                  Apache License
                                Version 2.0, January 2004
    @@ -5295,6 +5482,7 @@ 

    Used by:

  • utf-8
  • utf8parse
  • wasm-streams
  • +
  • zerocopy
  •                               Apache License
                             Version 2.0, January 2004
    @@ -8230,6 +8418,7 @@ 

    Used by:

  • cc
  • cfg-if
  • cfg-if
  • +
  • ci_info
  • cmake
  • concurrent-queue
  • const-random
  • @@ -8246,6 +8435,7 @@

    Used by:

  • derive_arbitrary
  • displaydoc
  • either
  • +
  • envmnt
  • equivalent
  • error-chain
  • event-listener
  • @@ -8257,6 +8447,7 @@

    Used by:

  • fnv
  • form_urlencoded
  • fraction
  • +
  • fsio
  • futures-lite
  • futures-timer
  • gimli
  • @@ -11067,6 +11258,53 @@

    Used by:

    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +
    + +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    # Contributing
    +
    +## License
    +
    +Licensed under either of
    +
    + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
    + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
    +
    +at your option.
    +
    +### Contribution
    +
    +Unless you explicitly state otherwise, any contribution intentionally submitted
    +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
    +additional terms or conditions.
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    ../../LICENSE-APACHE
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    // Licensed under the Apache License, Version 2.0
    +// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
    +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
    +// All files in the project carrying such notice may not be copied, modified, or distributed
    +// except according to those terms.
     
  • @@ -11703,14 +11941,10 @@

    Used by:

    Apache License 2.0

    Used by:

  • + +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    Copyright 2021 Oliver Giersch
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    Copyright [2022] [Bryn Cooke]
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    Copyright [2023] [Bryn Cooke]
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    Licensed under the Apache License, Version 2.0
    +<LICENSE-APACHE or
    +http://www.apache.org/licenses/LICENSE-2.0> or the MIT
    +license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
    +at your option. All files in the project carrying such
    +notice may not be copied, modified, or distributed except
    +according to those terms.
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    MIT OR Apache-2.0
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    MIT OR Apache-2.0
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    MIT or Apache-2.0
     
  • @@ -12672,36 +13011,6 @@

    Used by:

    // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -
  • - -
  • -

    ISC License

    -

    Used by:

    - -
    // Copyright 2021 Brian Smith.
    -//
    -// Permission to use, copy, modify, and/or distribute this software for any
    -// purpose with or without fee is hereby granted, provided that the above
    -// copyright notice and this permission notice appear in all copies.
    -//
    -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
    -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR
    -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    -
    -#[test]
    -fn cert_without_extensions_test() {
    -    // Check the certificate is valid with
    -    // `openssl x509 -in cert_without_extensions.der -inform DER -text -noout`
    -    const CERT_WITHOUT_EXTENSIONS_DER: &[u8] = include_bytes!("cert_without_extensions.der");
    -
    -    assert!(webpki::EndEntityCert::try_from(CERT_WITHOUT_EXTENSIONS_DER).is_ok());
    -}
     
  • @@ -12771,6 +13080,7 @@

    ISC License

    Used by:

    ISC License:
     
    @@ -13520,6 +13830,66 @@ 

    Used by:

    shall be included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +
    +
  • +
  • +

    MIT License

    +

    Used by:

    + +
    Copyright (c) 2019 Carl Lerche
    +
    +Permission is hereby granted, free of charge, to any
    +person obtaining a copy of this software and associated
    +documentation files (the "Software"), to deal in the
    +Software without restriction, including without
    +limitation the rights to use, copy, modify, merge,
    +publish, distribute, sublicense, and/or sell copies of
    +the Software, and to permit persons to whom the Software
    +is furnished to do so, subject to the following
    +conditions:
    +
    +The above copyright notice and this permission notice
    +shall be included in all copies or substantial portions
    +of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
    +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
    +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
    +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
    +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
    +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
    +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
    +DEALINGS IN THE SOFTWARE.
    +
    +Copyright (c) 2018 David Tolnay
    +
    +Permission is hereby granted, free of charge, to any
    +person obtaining a copy of this software and associated
    +documentation files (the "Software"), to deal in the
    +Software without restriction, including without
    +limitation the rights to use, copy, modify, merge,
    +publish, distribute, sublicense, and/or sell copies of
    +the Software, and to permit persons to whom the Software
    +is furnished to do so, subject to the following
    +conditions:
    +
    +The above copyright notice and this permission notice
    +shall be included in all copies or substantial portions
    +of the Software.
    +
     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
     ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
     TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
    @@ -14526,8 +14896,6 @@ 

    Used by:

    MIT License

    Used by:

    +
  • +
  • +

    MIT License

    +

    Used by:

    + +
    The MIT License (MIT)
    +
    +Copyright (c) 2014 Mathijs van de Nes
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
    +
  • MIT License

    @@ -14872,6 +15269,8 @@

    Used by:

    The MIT License (MIT)
     
    @@ -15423,6 +15822,25 @@ 

    Used by:

    OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +
    +
  • +
  • +

    MIT License

    +

    Used by:

    + +
    This project is dual-licensed under the Unlicense and MIT licenses.
    +
    +You may use this code under the terms of either license.
     
  • @@ -15820,7 +16238,6 @@

    Used by:

    Mozilla Public License 2.0

    Used by:

    Mozilla Public License Version 2.0
    @@ -16203,8 +16620,8 @@ 

    Used by:

    Mozilla Public License 2.0

    Used by:

    Mozilla Public License Version 2.0
     ==================================
    @@ -16579,6 +16996,35 @@ 

    Used by:

    This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. +
    +
  • +
  • +

    Mozilla Public License 2.0

    +

    Used by:

    + +
    This packge contains a modified version of ca-bundle.crt:
    +
    +ca-bundle.crt -- Bundle of CA Root Certificates
    +
    +Certificate data from Mozilla as of: Thu Nov  3 19:04:19 2011#
    +This is a bundle of X.509 certificates of public Certificate Authorities
    +(CA). These were automatically extracted from Mozilla's root certificates
    +file (certdata.txt).  This file can be found in the mozilla source tree:
    +http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1#
    +It contains the certificates in PEM format and therefore
    +can be directly used with curl / libcurl / php_curl, or with
    +an Apache+mod_ssl webserver for SSL client authentication.
    +Just configure this file as the SSLCACertificateFile.#
    +
    +***** BEGIN LICENSE BLOCK *****
    +This Source Code Form is subject to the terms of the Mozilla Public License,
    +v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
    +one at http://mozilla.org/MPL/2.0/.
    +
    +***** END LICENSE BLOCK *****
    +@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $
     
  • @@ -16649,50 +17095,26 @@

    Used by:

    UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
     
    -See Terms of Use <https://www.unicode.org/copyright.html>
    -for definitions of Unicode Inc.’s Data Files and Software.
    +Unicode Data Files include all data files under the directories http://www.unicode.org/Public/, http://www.unicode.org/reports/, http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and http://www.unicode.org/utility/trac/browser/.
    +
    +Unicode Data Files do not include PDF online code charts under the directory http://www.unicode.org/Public/.
     
    -NOTICE TO USER: Carefully read the following legal agreement.
    -BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
    -DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
    -YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
    -TERMS AND CONDITIONS OF THIS AGREEMENT.
    -IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
    -THE DATA FILES OR SOFTWARE.
    +Software includes any source code published in the Unicode Standard or under the directories http://www.unicode.org/Public/, http://www.unicode.org/reports/, http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and http://www.unicode.org/utility/trac/browser/.
    +
    +NOTICE TO USER: Carefully read the following legal agreement. BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE THE DATA FILES OR SOFTWARE.
     
     COPYRIGHT AND PERMISSION NOTICE
     
    -Copyright © 1991-2022 Unicode, Inc. All rights reserved.
    -Distributed under the Terms of Use in https://www.unicode.org/copyright.html.
    +Copyright © 1991-2016 Unicode, Inc. All rights reserved. Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
     
    -Permission is hereby granted, free of charge, to any person obtaining
    -a copy of the Unicode data files and any associated documentation
    -(the "Data Files") or Unicode software and any associated documentation
    -(the "Software") to deal in the Data Files or Software
    -without restriction, including without limitation the rights to use,
    -copy, modify, merge, publish, distribute, and/or sell copies of
    -the Data Files or Software, and to permit persons to whom the Data Files
    -or Software are furnished to do so, provided that either
    -(a) this copyright and permission notice appear with all copies
    -of the Data Files or Software, or
    -(b) this copyright and permission notice appear in associated
    -Documentation.
    -
    -THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
    -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
    -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    -NONINFRINGEMENT OF THIRD PARTY RIGHTS.
    -IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
    -NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
    -DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
    -DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
    -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
    -PERFORMANCE OF THE DATA FILES OR SOFTWARE.
    -
    -Except as contained in this notice, the name of a copyright holder
    -shall not be used in advertising or otherwise to promote the sale,
    -use or other dealings in these Data Files or Software without prior
    -written authorization of the copyright holder.
    +Permission is hereby granted, free of charge, to any person obtaining a copy of the Unicode data files and any associated documentation (the "Data Files") or Unicode software and any associated documentation (the "Software") to deal in the Data Files or Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, and/or sell copies of the Data Files or Software, and to permit persons to whom the Data Files or Software are furnished to do so, provided that either
    +
    +     (a) this copyright and permission notice appear with all copies of the Data Files or Software, or
    +     (b) this copyright and permission notice appear in associated Documentation.
    +
    +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THE DATA FILES OR SOFTWARE.
    +
    +Except as contained in this notice, the name of a copyright holder shall not be used in advertising or otherwise to promote the sale, use or other dealings in these Data Files or Software without prior written authorization of the copyright holder.
     
  • diff --git a/scripts/install.sh b/scripts/install.sh index ebb6b69b6b..f250d420a1 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.53.0-rc.1" +PACKAGE_VERSION="v1.53.0" download_binary() { downloader --check