diff --git a/.dockerignore b/.dockerignore index 0743bcf..3df78dc 100644 --- a/.dockerignore +++ b/.dockerignore @@ -6,3 +6,5 @@ # Code /docs + +/bin \ No newline at end of file diff --git a/.gitignore b/.gitignore index 8a6d021..7eb3623 100644 --- a/.gitignore +++ b/.gitignore @@ -9,4 +9,5 @@ _build /bin cover.out charts/*/Chart.lock -kubelb-*.tgz \ No newline at end of file +kubelb-*.tgz +__debug* \ No newline at end of file diff --git a/Makefile b/Makefile index fbe8def..8fc62af 100644 --- a/Makefile +++ b/Makefile @@ -99,7 +99,7 @@ clean: ## Clean binaries .PHONY: test test: envtest ## Run tests. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./internal/... -coverprofile cover.out + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test -v ./internal/... -coverprofile cover.out ##@ Build diff --git a/api/kubelb.k8c.io/v1alpha1/addresses_types.go b/api/kubelb.k8c.io/v1alpha1/addresses_types.go new file mode 100644 index 0000000..c676799 --- /dev/null +++ b/api/kubelb.k8c.io/v1alpha1/addresses_types.go @@ -0,0 +1,57 @@ +/* +Copyright 2024 The KubeLB Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AddressesSpec defines the desired state of Addresses +type AddressesSpec struct { + // Addresses contains a list of addresses. + //+kubebuilder:validation:MinItems:=1 + Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"` +} + +// AddressesStatus defines the observed state of Addresses +type AddressesStatus struct { +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Addresses is the Schema for the addresses API +type Addresses struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AddressesSpec `json:"spec,omitempty"` + Status AddressesStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AddressesList contains a list of Addresses +type AddressesList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Addresses `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Addresses{}, &AddressesList{}) +} diff --git a/api/kubelb.k8c.io/v1alpha1/common_types.go b/api/kubelb.k8c.io/v1alpha1/common_types.go index 751238e..452a474 100644 --- a/api/kubelb.k8c.io/v1alpha1/common_types.go +++ b/api/kubelb.k8c.io/v1alpha1/common_types.go @@ -18,6 +18,11 @@ package v1alpha1 import corev1 "k8s.io/api/core/v1" +const ( + // DefaultAddressName is the default name for the Addresses object. + DefaultAddressName = "default" +) + // LoadBalancerEndpoints is a group of addresses with a common set of ports. The // expanded set of endpoints is the Cartesian product of Addresses x Ports. // For example, given: @@ -40,7 +45,14 @@ type LoadBalancerEndpoints struct { //+kubebuilder:validation:MinItems:=1 Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"` + // AddressesReference is a reference to the Addresses object that contains the IP addresses. + // If this field is set, the Addresses field will be ignored. + // +optional + AddressesReference *corev1.ObjectReference `json:"addressesReference,omitempty" protobuf:"bytes,2,opt,name=addressesReference"` + // Port numbers available on the related IP addresses. + // This field is ignored for routes that are using kubernetes resources as the source. + // +optional //+kubebuilder:validation:MinItems=1 Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"` } diff --git a/api/kubelb.k8c.io/v1alpha1/config_types.go b/api/kubelb.k8c.io/v1alpha1/config_types.go index ff7a223..2260451 100644 --- a/api/kubelb.k8c.io/v1alpha1/config_types.go +++ b/api/kubelb.k8c.io/v1alpha1/config_types.go @@ -42,6 +42,11 @@ type ConfigSpec struct { // PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored. // +optional PropagateAllAnnotations bool `json:"propagateAllAnnotations,omitempty"` + + // IngressClassName is the name of the IngressClass that will be used for the routes created by KubeLB. If not specified, KubeLB will replace the IngressClassName + // with an empty value in the Ingress resource which would result in the default IngressClass being used. + // +optional + IngressClassName *string `json:"ingressClassName,omitempty"` } // EnvoyProxy defines the desired state of the EnvoyProxy diff --git a/api/kubelb.k8c.io/v1alpha1/loadbalancer_types.go b/api/kubelb.k8c.io/v1alpha1/loadbalancer_types.go index bbdde0f..1536af5 100644 --- a/api/kubelb.k8c.io/v1alpha1/loadbalancer_types.go +++ b/api/kubelb.k8c.io/v1alpha1/loadbalancer_types.go @@ -26,8 +26,6 @@ import ( // To configure multiple different annotations, you can provide unique suffix e.g. "kubelb.k8c.io/propagate-annotation-1" var PropagateAnnotation = "kubelb.k8c.io/propagate-annotation" -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - // LoadBalancerStatus defines the observed state of LoadBalancer type LoadBalancerStatus struct { // LoadBalancer contains the current status of the load-balancer, diff --git a/api/kubelb.k8c.io/v1alpha1/route_types.go b/api/kubelb.k8c.io/v1alpha1/route_types.go index d0de479..772f4e1 100644 --- a/api/kubelb.k8c.io/v1alpha1/route_types.go +++ b/api/kubelb.k8c.io/v1alpha1/route_types.go @@ -20,6 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + runtime "k8s.io/apimachinery/pkg/runtime" gwapiv1a2 "sigs.k8s.io/gateway-api/apis/v1alpha2" ) @@ -37,6 +38,7 @@ type RouteSpec struct { type RouteSource struct { // Kubernetes contains the information about the Kubernetes source. + // This field is automatically populated by the KubeLB CCM and in most cases, users should not set this field manually. Kubernetes *KubernetesSource `json:"kubernetes,omitempty"` } @@ -104,6 +106,55 @@ type Route struct { // RouteStatus defines the observed state of the Route. type RouteStatus struct { + // Resources contains the list of resources that are created/processed as a result of the Route. + Resources RouteResourcesStatus `json:"resources,omitempty"` +} + +type RouteResourcesStatus struct { + Source string `json:"source,omitempty"` + + Services map[string]RouteServiceStatus `json:"services,omitempty"` + + ReferenceGrants map[string]ResourceState `json:"referenceGrants,omitempty"` + + Route ResourceState `json:"route,omitempty"` +} + +type RouteServiceStatus struct { + ResourceState `json:",inline"` + Ports []corev1.ServicePort `json:"ports,omitempty"` +} + +type ResourceState struct { + // APIVersion is the API version of the resource. + APIVersion string `json:"apiVersion,omitempty"` + + // Kind is the kind of the resource. + Kind string `json:"kind,omitempty"` + + // Name is the name of the resource. + Name string `json:"name,omitempty"` + + // Namespace is the namespace of the resource. + Namespace string `json:"namespace,omitempty"` + + // GeneratedName is the generated name of the resource. + GeneratedName string `json:"generatedName,omitempty"` + + // Status is the actual status of the resource. + Status runtime.RawExtension `json:"status,omitempty"` + + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +type ConditionType string + +const ( + ConditionResourceAppliedSuccessfully ConditionType = "ResourceAppliedSuccessfully" +) + +func (t ConditionType) String() string { + return string(t) } //+kubebuilder:object:root=true diff --git a/api/kubelb.k8c.io/v1alpha1/zz_generated.deepcopy.go b/api/kubelb.k8c.io/v1alpha1/zz_generated.deepcopy.go index 9571bc7..85e98c5 100644 --- a/api/kubelb.k8c.io/v1alpha1/zz_generated.deepcopy.go +++ b/api/kubelb.k8c.io/v1alpha1/zz_generated.deepcopy.go @@ -22,9 +22,104 @@ package v1alpha1 import ( "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Addresses) DeepCopyInto(out *Addresses) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addresses. +func (in *Addresses) DeepCopy() *Addresses { + if in == nil { + return nil + } + out := new(Addresses) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Addresses) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressesList) DeepCopyInto(out *AddressesList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Addresses, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressesList. +func (in *AddressesList) DeepCopy() *AddressesList { + if in == nil { + return nil + } + out := new(AddressesList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AddressesList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressesSpec) DeepCopyInto(out *AddressesSpec) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressesSpec. +func (in *AddressesSpec) DeepCopy() *AddressesSpec { + if in == nil { + return nil + } + out := new(AddressesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressesStatus) DeepCopyInto(out *AddressesStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressesStatus. +func (in *AddressesStatus) DeepCopy() *AddressesStatus { + if in == nil { + return nil + } + out := new(AddressesStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Config) DeepCopyInto(out *Config) { *out = *in @@ -94,6 +189,11 @@ func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { (*out)[key] = val } } + if in.IngressClassName != nil { + in, out := &in.IngressClassName, &out.IngressClassName + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec. @@ -240,6 +340,11 @@ func (in *LoadBalancerEndpoints) DeepCopyInto(out *LoadBalancerEndpoints) { *out = make([]EndpointAddress, len(*in)) copy(*out, *in) } + if in.AddressesReference != nil { + in, out := &in.AddressesReference, &out.AddressesReference + *out = new(v1.ObjectReference) + **out = **in + } if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]EndpointPort, len(*in)) @@ -348,13 +453,36 @@ func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceState) DeepCopyInto(out *ResourceState) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceState. +func (in *ResourceState) DeepCopy() *ResourceState { + if in == nil { + return nil + } + out := new(ResourceState) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Route) DeepCopyInto(out *Route) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route. @@ -407,6 +535,59 @@ func (in *RouteList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteResourcesStatus) DeepCopyInto(out *RouteResourcesStatus) { + *out = *in + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make(map[string]RouteServiceStatus, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.ReferenceGrants != nil { + in, out := &in.ReferenceGrants, &out.ReferenceGrants + *out = make(map[string]ResourceState, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.Route.DeepCopyInto(&out.Route) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteResourcesStatus. +func (in *RouteResourcesStatus) DeepCopy() *RouteResourcesStatus { + if in == nil { + return nil + } + out := new(RouteResourcesStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteServiceStatus) DeepCopyInto(out *RouteServiceStatus) { + *out = *in + in.ResourceState.DeepCopyInto(&out.ResourceState) + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1.ServicePort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteServiceStatus. +func (in *RouteServiceStatus) DeepCopy() *RouteServiceStatus { + if in == nil { + return nil + } + out := new(RouteServiceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RouteSource) DeepCopyInto(out *RouteSource) { *out = *in @@ -453,6 +634,7 @@ func (in *RouteSpec) DeepCopy() *RouteSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RouteStatus) DeepCopyInto(out *RouteStatus) { *out = *in + in.Resources.DeepCopyInto(&out.Resources) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteStatus. diff --git a/charts/kubelb-manager/crds/kubelb.k8c.io_addresses.yaml b/charts/kubelb-manager/crds/kubelb.k8c.io_addresses.yaml new file mode 100644 index 0000000..7be50ad --- /dev/null +++ b/charts/kubelb-manager/crds/kubelb.k8c.io_addresses.yaml @@ -0,0 +1,71 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: addresses.kubelb.k8c.io +spec: + group: kubelb.k8c.io + names: + kind: Addresses + listKind: AddressesList + plural: addresses + singular: addresses + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Addresses is the Schema for the addresses API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AddressesSpec defines the desired state of Addresses + properties: + addresses: + description: Addresses contains a list of addresses. + items: + description: EndpointAddress is a tuple that describes single IP + address. + properties: + hostname: + description: The Hostname of this endpoint + type: string + ip: + description: |- + The IP of this endpoint. + May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), + or link-local multicast ((224.0.0.0/24). + minLength: 7 + type: string + required: + - ip + type: object + minItems: 1 + type: array + type: object + status: + description: AddressesStatus defines the observed state of Addresses + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/kubelb-manager/crds/kubelb.k8c.io_configs.yaml b/charts/kubelb-manager/crds/kubelb.k8c.io_configs.yaml index 6d8128d..ecfc8a2 100644 --- a/charts/kubelb-manager/crds/kubelb.k8c.io_configs.yaml +++ b/charts/kubelb-manager/crds/kubelb.k8c.io_configs.yaml @@ -1102,6 +1102,11 @@ spec: If set to true, Replicas will be ignored. type: boolean type: object + ingressClassName: + description: |- + IngressClassName is the name of the IngressClass that will be used for the routes created by KubeLB. If not specified, KubeLB will replace the IngressClassName + with an empty value in the Ingress resource which would result in the default IngressClass being used. + type: string propagateAllAnnotations: description: PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, diff --git a/charts/kubelb-manager/crds/kubelb.k8c.io_loadbalancers.yaml b/charts/kubelb-manager/crds/kubelb.k8c.io_loadbalancers.yaml index f8ed44a..34f6da8 100644 --- a/charts/kubelb-manager/crds/kubelb.k8c.io_loadbalancers.yaml +++ b/charts/kubelb-manager/crds/kubelb.k8c.io_loadbalancers.yaml @@ -77,11 +77,59 @@ spec: type: object minItems: 1 type: array + addressesReference: + description: |- + AddressesReference is a reference to the Addresses object that contains the IP addresses. + If this field is set, the Addresses field will be ignored. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic name: description: Name is the name of the endpoints. type: string ports: - description: Port numbers available on the related IP addresses. + description: |- + Port numbers available on the related IP addresses. + This field is ignored for routes that are using kubernetes resources as the source. items: description: EndpointPort is a tuple that describes a single port. diff --git a/charts/kubelb-manager/crds/kubelb.k8c.io_routes.yaml b/charts/kubelb-manager/crds/kubelb.k8c.io_routes.yaml index 0ca1cb8..7218433 100644 --- a/charts/kubelb-manager/crds/kubelb.k8c.io_routes.yaml +++ b/charts/kubelb-manager/crds/kubelb.k8c.io_routes.yaml @@ -75,11 +75,59 @@ spec: type: object minItems: 1 type: array + addressesReference: + description: |- + AddressesReference is a reference to the Addresses object that contains the IP addresses. + If this field is set, the Addresses field will be ignored. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic name: description: Name is the name of the endpoints. type: string ports: - description: Port numbers available on the related IP addresses. + description: |- + Port numbers available on the related IP addresses. + This field is ignored for routes that are using kubernetes resources as the source. items: description: EndpointPort is a tuple that describes a single port. @@ -116,8 +164,9 @@ spec: route. This is used when the route is created from external sources. properties: kubernetes: - description: Kubernetes contains the information about the Kubernetes - source. + description: |- + Kubernetes contains the information about the Kubernetes source. + This field is automatically populated by the KubeLB CCM and in most cases, users should not set this field manually. properties: referenceGrants: description: |- @@ -837,6 +886,378 @@ spec: type: object status: description: RouteStatus defines the observed state of the Route. + properties: + resources: + description: Resources contains the list of resources that are created/processed + as a result of the Route. + properties: + referenceGrants: + additionalProperties: + properties: + apiVersion: + description: APIVersion is the API version of the resource. + type: string + conditions: + items: + description: "Condition contains details for one aspect + of the current state of this API Resource.\n---\nThis + struct is intended for direct use as an array at the + field path .status.conditions. For example,\n\n\n\ttype + FooStatus struct{\n\t // Represents the observations + of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t + \ // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t + \ // +listType=map\n\t // +listMapKey=type\n\t + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + generatedName: + description: GeneratedName is the generated name of the + resource. + type: string + kind: + description: Kind is the kind of the resource. + type: string + name: + description: Name is the name of the resource. + type: string + namespace: + description: Namespace is the namespace of the resource. + type: string + status: + description: Status is the actual status of the resource. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + type: object + route: + properties: + apiVersion: + description: APIVersion is the API version of the resource. + type: string + conditions: + items: + description: "Condition contains details for one aspect + of the current state of this API Resource.\n---\nThis + struct is intended for direct use as an array at the field + path .status.conditions. For example,\n\n\n\ttype FooStatus + struct{\n\t // Represents the observations of a foo's + current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t + \ // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t + \ // +listType=map\n\t // +listMapKey=type\n\t Conditions + []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" + patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + generatedName: + description: GeneratedName is the generated name of the resource. + type: string + kind: + description: Kind is the kind of the resource. + type: string + name: + description: Name is the name of the resource. + type: string + namespace: + description: Namespace is the namespace of the resource. + type: string + status: + description: Status is the actual status of the resource. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + services: + additionalProperties: + properties: + apiVersion: + description: APIVersion is the API version of the resource. + type: string + conditions: + items: + description: "Condition contains details for one aspect + of the current state of this API Resource.\n---\nThis + struct is intended for direct use as an array at the + field path .status.conditions. For example,\n\n\n\ttype + FooStatus struct{\n\t // Represents the observations + of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t + \ // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t + \ // +listType=map\n\t // +listMapKey=type\n\t + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + generatedName: + description: GeneratedName is the generated name of the + resource. + type: string + kind: + description: Kind is the kind of the resource. + type: string + name: + description: Name is the name of the resource. + type: string + namespace: + description: Namespace is the namespace of the resource. + type: string + ports: + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this + service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + status: + description: Status is the actual status of the resource. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + type: object + source: + type: string + type: object type: object type: object served: true diff --git a/cmd/ccm/main.go b/cmd/ccm/main.go index f59b8c4..b72a4e3 100644 --- a/cmd/ccm/main.go +++ b/cmd/ccm/main.go @@ -27,9 +27,8 @@ import ( "go.uber.org/zap/zapcore" - kubelbk8ciov1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" + kubelbv1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" "k8c.io/kubelb/internal/controllers/ccm" - "k8c.io/kubelb/internal/kubelb" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -57,7 +56,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(kubelbk8ciov1alpha1.AddToScheme(scheme)) + utilruntime.Must(kubelbv1alpha1.AddToScheme(scheme)) utilruntime.Must(gwapiv1a2.Install(scheme)) utilruntime.Must(gwapiv1.Install(scheme)) @@ -120,11 +119,6 @@ func main() { setupLog.V(1).Info("using endpoint address", "type", endpointAddressType) - sharedEndpoints := kubelb.Endpoints{ - ClusterEndpoints: []string{}, - EndpointAddressType: endpointAddressType, - } - // setup signal handler ctx := ctrl.SetupSignalHandler() @@ -145,12 +139,12 @@ func main() { Scheme: scheme, Cache: cache.Options{ ByObject: map[client.Object]cache.ByObject{ - &kubelbk8ciov1alpha1.LoadBalancer{}: { + &kubelbv1alpha1.LoadBalancer{}: { Namespaces: map[string]cache.Config{ clusterName: {}, }, }, - &kubelbk8ciov1alpha1.Route{}: { + &kubelbv1alpha1.Route{}: { Namespaces: map[string]cache.Config{ clusterName: {}, }, @@ -182,11 +176,12 @@ func main() { } if err = (&ccm.KubeLBNodeReconciler{ - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("kubelb.node.reconciler"), - Scheme: mgr.GetScheme(), - KubeLBClient: kubeLBMgr.GetClient(), - Endpoints: &sharedEndpoints, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("kubelb.node.reconciler"), + Scheme: mgr.GetScheme(), + KubeLBClient: kubeLBMgr.GetClient(), + EndpointAddressType: endpointAddressType, + ClusterName: clusterName, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "reconciler", "kubelb.node.reconciler") os.Exit(1) @@ -199,7 +194,6 @@ func main() { Scheme: mgr.GetScheme(), CloudController: enableCloudController, UseLoadbalancerClass: useLoadbalancerClass, - Endpoints: &sharedEndpoints, ClusterName: clusterName, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "reconciler", "kubelb.service.reconciler") diff --git a/cmd/kubelb/main.go b/cmd/kubelb/main.go index 79fde02..6579d14 100644 --- a/cmd/kubelb/main.go +++ b/cmd/kubelb/main.go @@ -22,7 +22,7 @@ import ( "go.uber.org/zap/zapcore" - kubelbk8ciov1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" + kubelbv1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" "k8c.io/kubelb/internal/config" "k8c.io/kubelb/internal/controllers/kubelb" "k8c.io/kubelb/internal/envoy" @@ -55,7 +55,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(kubelbk8ciov1alpha1.AddToScheme(scheme)) + utilruntime.Must(kubelbv1alpha1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } @@ -132,22 +132,17 @@ func main() { setupLog.Error(err, "unable to load controller config") os.Exit(1) } - // For Global topology, we need to ensure that the port lookup table exists. If it doesn't, we create it since it's managed by this controller. - var portAllocator *portlookup.PortAllocator - if kubelb.EnvoyProxyTopology(config.GetEnvoyProxyTopology()) == kubelb.EnvoyProxyTopologyGlobal { - portAllocator = portlookup.NewPortAllocator() - if err := portAllocator.LoadState(ctx, mgr.GetAPIReader()); err != nil { - setupLog.Error(err, ("unable to load port lookup state")) - os.Exit(1) - } + portAllocator := portlookup.NewPortAllocator() + if err := portAllocator.LoadState(ctx, mgr.GetAPIReader()); err != nil { + setupLog.Error(err, ("unable to load port lookup state")) + os.Exit(1) } if err = (&kubelb.LoadBalancerReconciler{ Client: mgr.GetClient(), Cache: mgr.GetCache(), Scheme: mgr.GetScheme(), - EnvoyBootstrap: envoyServer.GenerateBootstrap(), Namespace: opt.namespace, EnvoyProxyTopology: kubelb.EnvoyProxyTopology(config.GetEnvoyProxyTopology()), PortAllocator: portAllocator, @@ -170,11 +165,25 @@ func main() { EnvoyCache: envoyServer.Cache, EnvoyProxyTopology: kubelb.EnvoyProxyTopology(config.GetEnvoyProxyTopology()), PortAllocator: portAllocator, + Namespace: opt.namespace, + EnvoyBootstrap: envoyServer.GenerateBootstrap(), }).SetupWithManager(ctx, envoyMgr); err != nil { setupLog.Error(err, "unable to create envoy control-plane controller", "controller", "LoadBalancer") os.Exit(1) } + if err = (&kubelb.RouteReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName(kubelb.RouteControllerName), + Recorder: mgr.GetEventRecorderFor(kubelb.RouteControllerName), + EnvoyProxyTopology: kubelb.EnvoyProxyTopology(config.GetEnvoyProxyTopology()), + PortAllocator: portAllocator, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", kubelb.RouteControllerName) + os.Exit(1) + } + go func() { setupLog.Info("starting kubelb envoy manager") diff --git a/config/ccm/rbac/role.yaml b/config/ccm/rbac/role.yaml index b97ad6a..ee59f11 100644 --- a/config/ccm/rbac/role.yaml +++ b/config/ccm/rbac/role.yaml @@ -31,6 +31,24 @@ rules: - get - patch - update +- apiGroups: + - kubelb.k8c.io + resources: + - routes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - kubelb.k8c.io + resources: + - routes/status + verbs: + - get - apiGroups: - networking.k8s.io resources: diff --git a/config/crd/bases/kubelb.k8c.io_addresses.yaml b/config/crd/bases/kubelb.k8c.io_addresses.yaml new file mode 100644 index 0000000..7be50ad --- /dev/null +++ b/config/crd/bases/kubelb.k8c.io_addresses.yaml @@ -0,0 +1,71 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: addresses.kubelb.k8c.io +spec: + group: kubelb.k8c.io + names: + kind: Addresses + listKind: AddressesList + plural: addresses + singular: addresses + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Addresses is the Schema for the addresses API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AddressesSpec defines the desired state of Addresses + properties: + addresses: + description: Addresses contains a list of addresses. + items: + description: EndpointAddress is a tuple that describes single IP + address. + properties: + hostname: + description: The Hostname of this endpoint + type: string + ip: + description: |- + The IP of this endpoint. + May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), + or link-local multicast ((224.0.0.0/24). + minLength: 7 + type: string + required: + - ip + type: object + minItems: 1 + type: array + type: object + status: + description: AddressesStatus defines the observed state of Addresses + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/kubelb.k8c.io_configs.yaml b/config/crd/bases/kubelb.k8c.io_configs.yaml index 6d8128d..ecfc8a2 100644 --- a/config/crd/bases/kubelb.k8c.io_configs.yaml +++ b/config/crd/bases/kubelb.k8c.io_configs.yaml @@ -1102,6 +1102,11 @@ spec: If set to true, Replicas will be ignored. type: boolean type: object + ingressClassName: + description: |- + IngressClassName is the name of the IngressClass that will be used for the routes created by KubeLB. If not specified, KubeLB will replace the IngressClassName + with an empty value in the Ingress resource which would result in the default IngressClass being used. + type: string propagateAllAnnotations: description: PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, diff --git a/config/crd/bases/kubelb.k8c.io_loadbalancers.yaml b/config/crd/bases/kubelb.k8c.io_loadbalancers.yaml index f8ed44a..34f6da8 100644 --- a/config/crd/bases/kubelb.k8c.io_loadbalancers.yaml +++ b/config/crd/bases/kubelb.k8c.io_loadbalancers.yaml @@ -77,11 +77,59 @@ spec: type: object minItems: 1 type: array + addressesReference: + description: |- + AddressesReference is a reference to the Addresses object that contains the IP addresses. + If this field is set, the Addresses field will be ignored. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic name: description: Name is the name of the endpoints. type: string ports: - description: Port numbers available on the related IP addresses. + description: |- + Port numbers available on the related IP addresses. + This field is ignored for routes that are using kubernetes resources as the source. items: description: EndpointPort is a tuple that describes a single port. diff --git a/config/crd/bases/kubelb.k8c.io_routes.yaml b/config/crd/bases/kubelb.k8c.io_routes.yaml index 0ca1cb8..7218433 100644 --- a/config/crd/bases/kubelb.k8c.io_routes.yaml +++ b/config/crd/bases/kubelb.k8c.io_routes.yaml @@ -75,11 +75,59 @@ spec: type: object minItems: 1 type: array + addressesReference: + description: |- + AddressesReference is a reference to the Addresses object that contains the IP addresses. + If this field is set, the Addresses field will be ignored. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic name: description: Name is the name of the endpoints. type: string ports: - description: Port numbers available on the related IP addresses. + description: |- + Port numbers available on the related IP addresses. + This field is ignored for routes that are using kubernetes resources as the source. items: description: EndpointPort is a tuple that describes a single port. @@ -116,8 +164,9 @@ spec: route. This is used when the route is created from external sources. properties: kubernetes: - description: Kubernetes contains the information about the Kubernetes - source. + description: |- + Kubernetes contains the information about the Kubernetes source. + This field is automatically populated by the KubeLB CCM and in most cases, users should not set this field manually. properties: referenceGrants: description: |- @@ -837,6 +886,378 @@ spec: type: object status: description: RouteStatus defines the observed state of the Route. + properties: + resources: + description: Resources contains the list of resources that are created/processed + as a result of the Route. + properties: + referenceGrants: + additionalProperties: + properties: + apiVersion: + description: APIVersion is the API version of the resource. + type: string + conditions: + items: + description: "Condition contains details for one aspect + of the current state of this API Resource.\n---\nThis + struct is intended for direct use as an array at the + field path .status.conditions. For example,\n\n\n\ttype + FooStatus struct{\n\t // Represents the observations + of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t + \ // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t + \ // +listType=map\n\t // +listMapKey=type\n\t + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + generatedName: + description: GeneratedName is the generated name of the + resource. + type: string + kind: + description: Kind is the kind of the resource. + type: string + name: + description: Name is the name of the resource. + type: string + namespace: + description: Namespace is the namespace of the resource. + type: string + status: + description: Status is the actual status of the resource. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + type: object + route: + properties: + apiVersion: + description: APIVersion is the API version of the resource. + type: string + conditions: + items: + description: "Condition contains details for one aspect + of the current state of this API Resource.\n---\nThis + struct is intended for direct use as an array at the field + path .status.conditions. For example,\n\n\n\ttype FooStatus + struct{\n\t // Represents the observations of a foo's + current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t + \ // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t + \ // +listType=map\n\t // +listMapKey=type\n\t Conditions + []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" + patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + generatedName: + description: GeneratedName is the generated name of the resource. + type: string + kind: + description: Kind is the kind of the resource. + type: string + name: + description: Name is the name of the resource. + type: string + namespace: + description: Namespace is the namespace of the resource. + type: string + status: + description: Status is the actual status of the resource. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + services: + additionalProperties: + properties: + apiVersion: + description: APIVersion is the API version of the resource. + type: string + conditions: + items: + description: "Condition contains details for one aspect + of the current state of this API Resource.\n---\nThis + struct is intended for direct use as an array at the + field path .status.conditions. For example,\n\n\n\ttype + FooStatus struct{\n\t // Represents the observations + of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t + \ // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t + \ // +listType=map\n\t // +listMapKey=type\n\t + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + generatedName: + description: GeneratedName is the generated name of the + resource. + type: string + kind: + description: Kind is the kind of the resource. + type: string + name: + description: Name is the name of the resource. + type: string + namespace: + description: Namespace is the namespace of the resource. + type: string + ports: + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this + service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + status: + description: Status is the actual status of the resource. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + type: object + source: + type: string + type: object type: object type: object served: true diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 1ebdd77..50f7d33 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -5,6 +5,7 @@ resources: - bases/kubelb.k8c.io_loadbalancers.yaml - bases/kubelb.k8c.io_configs.yaml - bases/kubelb.k8c.io_routes.yaml +- bases/kubelb.k8c.io_addresses.yaml #+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: diff --git a/config/deploy/kubelb/kustomization.yaml b/config/deploy/kubelb/kustomization.yaml index 9239044..c12421e 100644 --- a/config/deploy/kubelb/kustomization.yaml +++ b/config/deploy/kubelb/kustomization.yaml @@ -6,4 +6,4 @@ resources: images: - name: controller newName: quay.io/kubermatic/kubelb-manager - newTag: v1.0.0 + newTag: v1.0.0 \ No newline at end of file diff --git a/config/kubelb/manager.yaml b/config/kubelb/manager.yaml index 1880605..5a7710a 100644 --- a/config/kubelb/manager.yaml +++ b/config/kubelb/manager.yaml @@ -65,6 +65,7 @@ spec: - name: kubelb args: - --enable-leader-election + - --debug=true image: controller:latest env: - name: NAMESPACE diff --git a/config/kubelb/rbac/role.yaml b/config/kubelb/rbac/role.yaml index 3be3e89..124fb35 100644 --- a/config/kubelb/rbac/role.yaml +++ b/config/kubelb/rbac/role.yaml @@ -48,6 +48,24 @@ rules: - patch - update - watch +- apiGroups: + - kubelb.k8c.io + resources: + - addresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - kubelb.k8c.io + resources: + - addresses/status + verbs: + - get - apiGroups: - kubelb.k8c.io resources: @@ -84,3 +102,43 @@ rules: - get - patch - update +- apiGroups: + - kubelb.k8c.io + resources: + - routes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - kubelb.k8c.io + resources: + - routes/status + verbs: + - get + - patch + - update +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - get + - patch + - update diff --git a/go.mod b/go.mod index ab8d254..d91bb41 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module k8c.io/kubelb go 1.22.0 -toolchain go1.22.3 +toolchain go1.22.5 require ( github.com/envoyproxy/go-control-plane v0.12.0 @@ -13,19 +13,22 @@ require ( github.com/onsi/gomega v1.33.1 github.com/pkg/errors v0.9.1 go.uber.org/zap v1.27.0 - google.golang.org/grpc v1.64.1 - google.golang.org/protobuf v1.34.1 - k8s.io/api v0.30.1 - k8s.io/apimachinery v0.30.1 - k8s.io/client-go v0.30.1 - k8s.io/code-generator v0.30.1 - k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 - sigs.k8s.io/controller-runtime v0.18.3 + google.golang.org/grpc v1.65.0 + google.golang.org/protobuf v1.34.2 + k8c.io/reconciler v0.5.0 + k8s.io/api v0.30.2 + k8s.io/apimachinery v0.30.2 + k8s.io/client-go v0.30.2 + k8s.io/code-generator v0.30.2 + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 + sigs.k8s.io/controller-runtime v0.18.4 sigs.k8s.io/gateway-api v1.1.0 + sigs.k8s.io/yaml v1.4.0 ) require ( cel.dev/expr v0.15.0 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -43,7 +46,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect + github.com/google/gofuzz v1.2.1-0.20210504230335-f78f29fc09ea // indirect github.com/google/uuid v1.6.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -52,35 +55,34 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/nxadm/tail v1.4.8 // indirect + github.com/nxadm/tail v1.4.11 // indirect github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.53.0 // indirect - github.com/prometheus/procfs v0.15.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240529005216-23cca8864a10 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.26.0 // indirect - golang.org/x/oauth2 v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240707233637-46b078467d37 // indirect + golang.org/x/mod v0.19.0 // indirect + golang.org/x/net v0.27.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/term v0.21.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/term v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + golang.org/x/tools v0.23.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.30.1 // indirect + k8s.io/apiextensions-apiserver v0.30.2 // indirect k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index d66bb12..f3051d2 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ cel.dev/expr v0.15.0 h1:O1jzfJCQBfL5BFoYktaxwIhuttaQPsVWerH9/EEKx0w= cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= @@ -23,6 +25,7 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0 github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -63,8 +66,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.1-0.20210504230335-f78f29fc09ea h1:VcIYpAGBae3Z6BVncE0OnTE/ZjlDXqtYhOZky88neLM= +github.com/google/gofuzz v1.2.1-0.20210504230335-f78f29fc09ea/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -92,8 +95,9 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= @@ -112,14 +116,14 @@ github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQ github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= -github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= -github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= -github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace h1:9PNP1jnUjRhfmGMlkXHjYPishpcw4jpSt/V/xYY3FMA= +github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= @@ -136,22 +140,22 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20240529005216-23cca8864a10 h1:vpzMC/iZhYFAjJzHU0Cfuq+w1vLLsF2vLkDrPjzKYck= -golang.org/x/exp v0.0.0-20240529005216-23cca8864a10/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp v0.0.0-20240707233637-46b078467d37 h1:uLDX+AfeFCct3a2C7uIWBKMJIR3CJMhcgfrUAqjRK6w= +golang.org/x/exp v0.0.0-20240707233637-46b078467d37/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -167,10 +171,11 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= @@ -182,28 +187,28 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= -google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= +google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY= +google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d h1:JU0iKnSg02Gmb5ZdV8nYsKEKsP6o/FGVWTrw4i1DA9A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -220,26 +225,28 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= -k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= -k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= -k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= -k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= -k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= -k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= -k8s.io/code-generator v0.30.1 h1:ZsG++q5Vt0ScmKCeLhynUuWgcwFGg1Hl1AGfatqPJBI= -k8s.io/code-generator v0.30.1/go.mod h1:hFgxRsvOUg79mbpbVKfjJvRhVz1qLoe40yZDJ/hwRH4= +k8c.io/reconciler v0.5.0 h1:BHpelg1UfI/7oBFctqOq8sX6qzflXpl3SlvHe7e8wak= +k8c.io/reconciler v0.5.0/go.mod h1:pT1+SVcVXJQeBJhpJBXQ5XW64QnKKeYTnVlQf0dGE0k= +k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI= +k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI= +k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE= +k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw= +k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg= +k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50= +k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs= +k8s.io/code-generator v0.30.2 h1:ZY1+aGkqZVwKIyGsOzquaeZ5rSfE6wZHur8z3jQAaiw= +k8s.io/code-generator v0.30.2/go.mod h1:RQP5L67QxqgkVquk704CyvWFIq0e6RCMmLTXxjE8dVA= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a h1:zD1uj3Jf+mD4zmA7W+goE5TxDkI7OGJjBNBzq5fJtLA= -k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= -k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= -k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.18.3 h1:B5Wmmo8WMWK7izei+2LlXLVDGzMwAHBNLX68lwtlSR4= -sigs.k8s.io/controller-runtime v0.18.3/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f h1:2sXuKesAYbRHxL3aE2PN6zX/gcJr22cjrsej+W784Tc= +k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/gateway-api v1.1.0 h1:DsLDXCi6jR+Xz8/xd0Z1PYl2Pn0TyaFMOPPZIj4inDM= sigs.k8s.io/gateway-api v1.1.0/go.mod h1:ZH4lHrL2sDi0FHZ9jjneb8kKnGzFWyrTya35sWUTrRs= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/hack/ci/e2e/tests/basic_test.go b/hack/ci/e2e/tests/basic_test.go index b12eb7c..40f42e8 100644 --- a/hack/ci/e2e/tests/basic_test.go +++ b/hack/ci/e2e/tests/basic_test.go @@ -70,8 +70,13 @@ func TestSimpleService(t *testing.T) { lb := v1alpha1.LoadBalancer{} Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant1", Name: string(svc.UID)}, &lb)).To(Succeed()) Expect(len(lb.Spec.Endpoints)).To(Equal(1)) - Expect(len(lb.Spec.Endpoints[0].Addresses)).To(Equal(1)) Expect(len(lb.Spec.Endpoints[0].Ports)).To(Equal(1)) + + Expect(lb.Spec.Endpoints[0].AddressesReference).ToNot(BeNil()) + // Retrieve the endpoint addresses and make sure they are correct + addresses := v1alpha1.Addresses{} + Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant1", Name: string(*&lb.Spec.Endpoints[0].AddressesReference.Name)}, &addresses)).To(Succeed()) + Expect(len(addresses.Spec.Addresses)).To(Equal(1)) } func TestMultiNodeService(t *testing.T) { @@ -92,8 +97,13 @@ func TestMultiNodeService(t *testing.T) { lb := v1alpha1.LoadBalancer{} Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant2", Name: string(svc.UID)}, &lb)).To(Succeed()) Expect(len(lb.Spec.Endpoints)).To(Equal(1)) - Expect(len(lb.Spec.Endpoints[0].Addresses)).To(Equal(4)) Expect(len(lb.Spec.Endpoints[0].Ports)).To(Equal(1)) + + Expect(lb.Spec.Endpoints[0].AddressesReference).ToNot(BeNil()) + // Retrieve the endpoint addresses and make sure they are correct + addresses := v1alpha1.Addresses{} + Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant2", Name: string(*&lb.Spec.Endpoints[0].AddressesReference.Name)}, &addresses)).To(Succeed()) + Expect(len(addresses.Spec.Addresses)).To(Equal(4)) } func TestMultiPortService(t *testing.T) { @@ -115,8 +125,13 @@ func TestMultiPortService(t *testing.T) { lb := v1alpha1.LoadBalancer{} Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant1", Name: string(svc.UID)}, &lb)).To(Succeed()) Expect(len(lb.Spec.Endpoints)).To(Equal(1)) - Expect(len(lb.Spec.Endpoints[0].Addresses)).To(Equal(1)) Expect(len(lb.Spec.Endpoints[0].Ports)).To(Equal(2)) + + Expect(lb.Spec.Endpoints[0].AddressesReference).ToNot(BeNil()) + // Retrieve the endpoint addresses and make sure they are correct + addresses := v1alpha1.Addresses{} + Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant2", Name: string(*&lb.Spec.Endpoints[0].AddressesReference.Name)}, &addresses)).To(Succeed()) + Expect(len(addresses.Spec.Addresses)).To(Equal(4)) } func TestMultiPortMultiNodeService(t *testing.T) { @@ -138,8 +153,13 @@ func TestMultiPortMultiNodeService(t *testing.T) { lb := v1alpha1.LoadBalancer{} Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant2", Name: string(svc.UID)}, &lb)).To(Succeed()) Expect(len(lb.Spec.Endpoints)).To(Equal(1)) - Expect(len(lb.Spec.Endpoints[0].Addresses)).To(Equal(4)) Expect(len(lb.Spec.Endpoints[0].Ports)).To(Equal(2)) + + Expect(lb.Spec.Endpoints[0].AddressesReference).ToNot(BeNil()) + // Retrieve the endpoint addresses and make sure they are correct + addresses := v1alpha1.Addresses{} + Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant2", Name: string(*&lb.Spec.Endpoints[0].AddressesReference.Name)}, &addresses)).To(Succeed()) + Expect(len(addresses.Spec.Addresses)).To(Equal(4)) } func TestDuplicateService(t *testing.T) { @@ -172,14 +192,24 @@ func TestDuplicateService(t *testing.T) { lb1 := v1alpha1.LoadBalancer{} Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant1", Name: string(svc1.UID)}, &lb1)).To(Succeed()) Expect(len(lb1.Spec.Endpoints)).To(Equal(1)) - Expect(len(lb1.Spec.Endpoints[0].Addresses)).To(Equal(1)) Expect(len(lb1.Spec.Endpoints[0].Ports)).To(Equal(2)) + Expect(lb1.Spec.Endpoints[0].AddressesReference).ToNot(BeNil()) + // Retrieve the endpoint addresses and make sure they are correct + addresses := v1alpha1.Addresses{} + Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant1", Name: string(*&lb1.Spec.Endpoints[0].AddressesReference.Name)}, &addresses)).To(Succeed()) + Expect(len(addresses.Spec.Addresses)).To(Equal(1)) + lb2 := v1alpha1.LoadBalancer{} Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant2", Name: string(svc2.UID)}, &lb2)).To(Succeed()) Expect(len(lb2.Spec.Endpoints)).To(Equal(1)) - Expect(len(lb2.Spec.Endpoints[0].Addresses)).To(Equal(4)) Expect(len(lb2.Spec.Endpoints[0].Ports)).To(Equal(2)) + + Expect(lb2.Spec.Endpoints[0].AddressesReference).ToNot(BeNil()) + // Retrieve the endpoint addresses and make sure they are correct + addresses = v1alpha1.Addresses{} + Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant2", Name: string(*&lb2.Spec.Endpoints[0].AddressesReference.Name)}, &addresses)).To(Succeed()) + Expect(len(addresses.Spec.Addresses)).To(Equal(4)) } func TestMultipleServices(t *testing.T) { @@ -224,12 +254,22 @@ func TestMultipleServices(t *testing.T) { lb1 := v1alpha1.LoadBalancer{} Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant2", Name: string(svc1.UID)}, &lb1)).To(Succeed()) Expect(len(lb1.Spec.Endpoints)).To(Equal(1)) - Expect(len(lb1.Spec.Endpoints[0].Addresses)).To(Equal(4)) Expect(len(lb1.Spec.Endpoints[0].Ports)).To(Equal(2)) + Expect(lb1.Spec.Endpoints[0].AddressesReference).ToNot(BeNil()) + // Retrieve the endpoint addresses and make sure they are correct + addresses := v1alpha1.Addresses{} + Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant2", Name: string(*&lb1.Spec.Endpoints[0].AddressesReference.Name)}, &addresses)).To(Succeed()) + Expect(len(addresses.Spec.Addresses)).To(Equal(4)) + lb2 := v1alpha1.LoadBalancer{} Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant2", Name: string(svc2.UID)}, &lb2)).To(Succeed()) Expect(len(lb2.Spec.Endpoints)).To(Equal(1)) - Expect(len(lb2.Spec.Endpoints[0].Addresses)).To(Equal(4)) Expect(len(lb2.Spec.Endpoints[0].Ports)).To(Equal(2)) + + Expect(lb2.Spec.Endpoints[0].AddressesReference).ToNot(BeNil()) + // Retrieve the endpoint addresses and make sure they are correct + addresses = v1alpha1.Addresses{} + Expect(kubelbK8sClient.Get(ctx, types.NamespacedName{Namespace: "cluster-tenant2", Name: string(*&lb2.Spec.Endpoints[0].AddressesReference.Name)}, &addresses)).To(Succeed()) + Expect(len(addresses.Spec.Addresses)).To(Equal(4)) } diff --git a/hack/reconciling.yaml b/hack/reconciling.yaml deleted file mode 100644 index 0206787..0000000 --- a/hack/reconciling.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2024 The KubeLB Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This defines the reconciling helpers we generate using -# https://github.com/kubermatic/reconciler - -package: reconciling -boilerplate: hack/boilerplate/boilerplate.go.txt -resourceTypes: {} diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index c0f0f68..f3d9366 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -49,4 +49,4 @@ rm -r "${SCRIPT_ROOT}"/${OUTPUT_PKG}/* mv "${SCRIPT_ROOT}"/hack/${MODULE}/${OUTPUT_PKG}/* "${SCRIPT_ROOT}"/${OUTPUT_PKG}/ -rm -r "${SCRIPT_ROOT}"/hack/k8c.io +rm -r "${SCRIPT_ROOT}"/hack/k8c.io \ No newline at end of file diff --git a/internal/config/config.go b/internal/config/config.go index 6fa6eca..afb5a83 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -54,3 +54,7 @@ func SetConfig(conf v1alpha1.Config) { func GetEnvoyProxyTopology() v1alpha1.EnvoyProxyTopology { return config.Spec.EnvoyProxy.Topology } + +func IsGlobalTopology() bool { + return GetEnvoyProxyTopology() == v1alpha1.EnvoyProxyTopologyGlobal +} diff --git a/internal/controllers/ccm/ingress_controller.go b/internal/controllers/ccm/ingress_controller.go index bf9869c..d029be6 100644 --- a/internal/controllers/ccm/ingress_controller.go +++ b/internal/controllers/ccm/ingress_controller.go @@ -18,12 +18,16 @@ package ccm import ( "context" + "encoding/json" "fmt" "reflect" "github.com/go-logr/logr" + kubelbv1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" + "k8c.io/kubelb/internal/kubelb" kuberneteshelper "k8c.io/kubelb/internal/kubernetes" + ingressHelpers "k8c.io/kubelb/internal/resources/ingress" serviceHelpers "k8c.io/kubelb/internal/resources/service" corev1 "k8s.io/api/core/v1" @@ -32,12 +36,15 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" ) const ( @@ -60,6 +67,8 @@ type IngressReconciler struct { // +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;patch // +kubebuilder:rbac:groups="",resources=services/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=kubelb.k8c.io,resources=routes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=kubelb.k8c.io,resources=routes/status,verbs=get // +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch func (r *IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -106,13 +115,79 @@ func (r *IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct func (r *IngressReconciler) reconcile(ctx context.Context, log logr.Logger, ingress *networkingv1.Ingress) error { // We need to traverse the Ingress, find all the services associated with it, create/update the corresponding Route in LB cluster. - originalServices := r.getServicesFromSource(ingress) - return reconcileSourceForRoute(ctx, log, r.Client, r.LBClient, ingress, originalServices, nil, r.ClusterName) + originalServices := ingressHelpers.GetServicesFromIngress(*ingress) + err := reconcileSourceForRoute(ctx, log, r.Client, r.LBClient, ingress, originalServices, nil, r.ClusterName) + if err != nil { + return fmt.Errorf("failed to reconcile source for route: %w", err) + } + + // Route was reconciled successfully, now we need to update the status of the Ingress. + route := kubelbv1alpha1.Route{} + err = r.LBClient.Get(ctx, types.NamespacedName{Name: string(ingress.UID), Namespace: r.ClusterName}, &route) + if err != nil { + return fmt.Errorf("failed to get Route from LB cluster: %w", err) + } + + // Update the status of the Ingress + if len(route.Status.Resources.Route.GeneratedName) > 0 { + // First we need to ensure that status is available in the Route + resourceStatus := route.Status.Resources.Route.Status + jsonData, err := json.Marshal(resourceStatus.Raw) + if err != nil || string(jsonData) == kubelb.DefaultRouteStatus { + // Status is not available in the Route, so we need to wait for it + return nil + } + + // Convert rawExtension to networkingv1.IngressStatus + status := networkingv1.IngressStatus{} + if err := yaml.UnmarshalStrict(resourceStatus.Raw, &status); err != nil { + return fmt.Errorf("failed to unmarshal Ingress status: %w", err) + } + + log.V(3).Info("updating Ingress status", "name", ingress.Name, "namespace", ingress.Namespace) + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + if err := r.Get(ctx, types.NamespacedName{Name: ingress.Name, Namespace: ingress.Namespace}, ingress); err != nil { + return err + } + original := ingress.DeepCopy() + ingress.Status = status + if reflect.DeepEqual(original.Status, ingress.Status) { + return nil + } + // update the status + return r.Status().Patch(ctx, ingress, ctrlclient.MergeFrom(original)) + }) + } + return nil } func (r *IngressReconciler) cleanup(ctx context.Context, ingress *networkingv1.Ingress) (ctrl.Result, error) { + impactedServices := ingressHelpers.GetServicesFromIngress(*ingress) + services := corev1.ServiceList{} + err := r.List(ctx, &services, ctrlclient.InNamespace(ingress.Namespace), ctrlclient.MatchingLabels{kubelb.LabelManagedBy: kubelb.LabelControllerName}) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to list services: %w", err) + } + + // Delete services created by the controller. + for _, service := range services.Items { + originalName := service.Name + if service.Labels[kubelb.LabelOriginName] != "" { + originalName = service.Labels[kubelb.LabelOriginName] + } + + for _, serviceRef := range impactedServices { + if serviceRef.Name == originalName && serviceRef.Namespace == service.Namespace { + err := r.Delete(ctx, &service) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to delete service: %w", err) + } + } + } + } + // Find the Route in LB cluster and delete it - err := cleanupRoute(ctx, r.LBClient, string(ingress.UID), ingress.Namespace) + err = cleanupRoute(ctx, r.LBClient, string(ingress.UID), r.ClusterName) if err != nil { return reconcile.Result{}, fmt.Errorf("failed to cleanup route: %w", err) } @@ -125,34 +200,11 @@ func (r *IngressReconciler) cleanup(ctx context.Context, ingress *networkingv1.I return reconcile.Result{}, nil } -// This method retrieves list of services from the Ingress and normalizes them. -func (r *IngressReconciler) getServicesFromSource(ingress *networkingv1.Ingress) []types.NamespacedName { - serviceReferences := make([]types.NamespacedName, 0) - for _, rule := range ingress.Spec.Rules { - for _, path := range rule.HTTP.Paths { - serviceReferences = append(serviceReferences, types.NamespacedName{ - Name: path.Backend.Service.Name, - Namespace: ingress.Namespace, - }) - } - } - - if ingress.Spec.DefaultBackend != nil && ingress.Spec.DefaultBackend.Service != nil { - serviceReferences = append(serviceReferences, types.NamespacedName{ - Name: ingress.Spec.DefaultBackend.Service.Name, - Namespace: ingress.Namespace, - }) - } - return serviceReferences -} - // enqueueIngresses is a handler.MapFunc to be used to enqeue requests for reconciliation // for Ingresses against the corresponding service. func (r *IngressReconciler) enqueueIngresses() handler.MapFunc { return func(_ context.Context, o ctrlclient.Object) []ctrl.Request { result := []reconcile.Request{} - - // TODO: We should use field indexers here to avoid listing all services ingressList := &networkingv1.IngressList{} if err := r.List(context.Background(), ingressList, ctrlclient.InNamespace(o.GetNamespace())); err != nil { return nil @@ -163,7 +215,7 @@ func (r *IngressReconciler) enqueueIngresses() handler.MapFunc { continue } - services := r.getServicesFromSource(&ingress) + services := ingressHelpers.GetServicesFromIngress(ingress) for _, serviceRef := range services { if (serviceRef.Name == o.GetName() || fmt.Sprintf(serviceHelpers.NodePortServicePattern, serviceRef.Name) == o.GetName()) && serviceRef.Namespace == o.GetNamespace() { result = append(result, reconcile.Request{ @@ -192,9 +244,7 @@ func (r *IngressReconciler) ingressFilter() predicate.Predicate { if !r.shouldReconcile(ingress) { return false } - oldIngress, _ := e.ObjectOld.(*networkingv1.Ingress) - return !reflect.DeepEqual(ingress.Spec, oldIngress.Spec) || !reflect.DeepEqual(ingress.Labels, oldIngress.Labels) || - !reflect.DeepEqual(ingress.Annotations, oldIngress.Annotations) + return e.ObjectOld.GetResourceVersion() != e.ObjectNew.GetResourceVersion() } return false }, @@ -222,8 +272,7 @@ func (r *IngressReconciler) shouldReconcile(ingress *networkingv1.Ingress) bool func (r *IngressReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&networkingv1.Ingress{}). - WithEventFilter(r.ingressFilter()). + For(&networkingv1.Ingress{}, builder.WithPredicates(r.ingressFilter())). Watches( &corev1.Service{}, handler.EnqueueRequestsFromMapFunc(r.enqueueIngresses()), diff --git a/internal/controllers/ccm/node_controller.go b/internal/controllers/ccm/node_controller.go index dc21f36..4d53a2a 100644 --- a/internal/controllers/ccm/node_controller.go +++ b/internal/controllers/ccm/node_controller.go @@ -18,26 +18,31 @@ package ccm import ( "context" + "reflect" "github.com/go-logr/logr" kubelbiov1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" - "k8c.io/kubelb/internal/kubelb" corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // KubeLBNodeReconciler reconciles a Service object type KubeLBNodeReconciler struct { ctrlclient.Client - KubeLBClient ctrlclient.Client - Log logr.Logger - Scheme *runtime.Scheme - Endpoints *kubelb.Endpoints + KubeLBClient ctrlclient.Client + ClusterName string + Log logr.Logger + Scheme *runtime.Scheme + EndpointAddressType corev1.NodeAddressType } // +kubebuilder:rbac:groups="",resources=nodes,verbs=list;get;watch @@ -54,49 +59,70 @@ func (r *KubeLBNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err } - log.V(6).Info("processing", "nodes", nodeList, "endpoints", r.Endpoints) - - if r.Endpoints.EndpointIsDesiredState(nodeList) { - log.V(2).Info("endpoints are in desired state") - return ctrl.Result{}, err + // Compute current state + currentAddresses := r.GenerateAddresses(nodeList) + + // Retrieve current state from the LB cluster + var addresses kubelbiov1alpha1.Addresses + if err = r.KubeLBClient.Get(ctx, types.NamespacedName{Name: kubelbiov1alpha1.DefaultAddressName, Namespace: r.ClusterName}, &addresses); err != nil { + if kerrors.IsNotFound(err) { + // Create the default address object + if err = r.KubeLBClient.Create(ctx, currentAddresses); err != nil { + log.Error(err, "unable to create addresses") + return ctrl.Result{}, err + } + } + return reconcile.Result{}, err } - log.V(6).Info("actual", "endpoints", r.Endpoints.ClusterEndpoints) - log.V(6).Info("desired", "endpoints", r.Endpoints.GetEndpoints(nodeList)) - - r.Endpoints.ClusterEndpoints = r.Endpoints.GetEndpoints(nodeList) - log.V(5).Info("proceeding with", "endpoints", r.Endpoints.ClusterEndpoints) + // Compare the current state with the desired state + if reflect.DeepEqual(addresses.Spec.Addresses, currentAddresses.Spec.Addresses) { + log.V(2).Info("addresses are in desired state") + return ctrl.Result{}, nil + } - // patch endpoints - var lbList kubelbiov1alpha1.LoadBalancerList - if err = r.KubeLBClient.List(ctx, &lbList); err != nil { - log.Error(err, "unable to list LoadBalancer") + // Update the addresses + addresses.Spec.Addresses = currentAddresses.Spec.Addresses + if err = r.KubeLBClient.Update(ctx, &addresses); err != nil { + log.Error(err, "unable to update addresses") return ctrl.Result{}, err } - log.V(6).Info("patching", "LoadBalancers", lbList) + return ctrl.Result{}, nil +} - var endpointAddresses []kubelbiov1alpha1.EndpointAddress - for _, endpoint := range r.Endpoints.ClusterEndpoints { - endpointAddresses = append(endpointAddresses, kubelbiov1alpha1.EndpointAddress{ +func (r *KubeLBNodeReconciler) GenerateAddresses(nodes *corev1.NodeList) *kubelbiov1alpha1.Addresses { + endpoints := r.getEndpoints(nodes) + var addresses []kubelbiov1alpha1.EndpointAddress + for _, endpoint := range endpoints { + addresses = append(addresses, kubelbiov1alpha1.EndpointAddress{ IP: endpoint, }) } - for _, lb := range lbList.Items { - for _, endpoints := range lb.Spec.Endpoints { - endpoints.Addresses = endpointAddresses - } + return &kubelbiov1alpha1.Addresses{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubelbiov1alpha1.DefaultAddressName, + Namespace: r.ClusterName, + }, + Spec: kubelbiov1alpha1.AddressesSpec{ + Addresses: addresses, + }, + } +} - if err = r.KubeLBClient.Update(ctx, &lb); err != nil { - log.Error(err, "unable to update", "LoadBalancer", lb.Name) +func (r *KubeLBNodeReconciler) getEndpoints(nodes *corev1.NodeList) []string { + var clusterEndpoints []string + for _, node := range nodes.Items { + var internalIP string + for _, address := range node.Status.Addresses { + if address.Type == r.EndpointAddressType { + internalIP = address.Address + } } - - log.V(2).Info("updated", "LoadBalancer", lb.Name) - log.V(7).Info("updated to", "LoadBalancer", lb) + clusterEndpoints = append(clusterEndpoints, internalIP) } - - return ctrl.Result{}, nil + return clusterEndpoints } func (r *KubeLBNodeReconciler) SetupWithManager(mgr ctrl.Manager) error { diff --git a/internal/controllers/ccm/service_controller.go b/internal/controllers/ccm/service_controller.go index b8cdcd8..76e6118 100644 --- a/internal/controllers/ccm/service_controller.go +++ b/internal/controllers/ccm/service_controller.go @@ -23,7 +23,7 @@ import ( "github.com/go-logr/logr" - kubelbk8ciov1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" + kubelbv1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" utils "k8c.io/kubelb/internal/controllers" "k8c.io/kubelb/internal/kubelb" @@ -55,7 +55,6 @@ type KubeLBServiceReconciler struct { ClusterName string CloudController bool UseLoadbalancerClass bool - Endpoints *kubelb.Endpoints } // +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;update;patch @@ -81,10 +80,7 @@ func (r *KubeLBServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - clusterEndpoints := r.getEndpoints(&service) - - log.V(6).Info("processing", "service", service) - log.V(5).Info("proceeding with", "endpoints", clusterEndpoints) + clusterEndpoints, useAddressesReference := r.getEndpoints(&service) // examine DeletionTimestamp to determine if object is under deletion if !service.ObjectMeta.DeletionTimestamp.IsZero() { @@ -110,12 +106,12 @@ func (r *KubeLBServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques log.V(5).Info("proceeding with", "endpoints", clusterEndpoints) - desiredLB := kubelb.MapLoadBalancer(&service, clusterEndpoints, r.ClusterName) + desiredLB := kubelb.MapLoadBalancer(&service, clusterEndpoints, useAddressesReference, r.ClusterName) log.V(6).Info("desired", "LoadBalancer", desiredLB) kubelbClient := r.KubeLBManager.GetClient() - var actualLB kubelbk8ciov1alpha1.LoadBalancer + var actualLB kubelbv1alpha1.LoadBalancer err = kubelbClient.Get(ctx, ctrlclient.ObjectKeyFromObject(desiredLB), &actualLB) log.V(6).Info("actual", "LoadBalancer", actualLB) @@ -174,7 +170,7 @@ func (r *KubeLBServiceReconciler) cleanupService(ctx context.Context, log logr.L return ctrl.Result{}, nil } - lb := &kubelbk8ciov1alpha1.LoadBalancer{ + lb := &kubelbv1alpha1.LoadBalancer{ ObjectMeta: metav1.ObjectMeta{ Name: string(service.UID), Namespace: r.ClusterName, @@ -202,8 +198,8 @@ func (r *KubeLBServiceReconciler) cleanupService(ctx context.Context, log logr.L return ctrl.Result{}, nil } -func (r *KubeLBServiceReconciler) enqueueLoadBalancer() handler.TypedMapFunc[*kubelbk8ciov1alpha1.LoadBalancer] { - return handler.TypedMapFunc[*kubelbk8ciov1alpha1.LoadBalancer](func(_ context.Context, lb *kubelbk8ciov1alpha1.LoadBalancer) []reconcile.Request { +func (r *KubeLBServiceReconciler) enqueueLoadBalancer() handler.TypedMapFunc[*kubelbv1alpha1.LoadBalancer] { + return handler.TypedMapFunc[*kubelbv1alpha1.LoadBalancer](func(_ context.Context, lb *kubelbv1alpha1.LoadBalancer) []reconcile.Request { if lb.GetNamespace() != r.ClusterName { return []reconcile.Request{} } @@ -214,14 +210,14 @@ func (r *KubeLBServiceReconciler) enqueueLoadBalancer() handler.TypedMapFunc[*ku originalNamespace, ok := lb.GetLabels()[kubelb.LabelOriginNamespace] if !ok || originalNamespace == "" { - r.Log.Error(fmt.Errorf("required label \"%s\" not found", kubelb.LabelOriginNamespace), fmt.Sprintf("failed to queue service for LoadBalacner: %s, could not determine origin namespace", lb.GetName())) + r.Log.Error(fmt.Errorf("required label \"%s\" not found", kubelb.LabelOriginNamespace), fmt.Sprintf("failed to queue service for LoadBalancer: %s, could not determine origin namespace", lb.GetName())) return []reconcile.Request{} } originalName, ok := lb.GetLabels()[kubelb.LabelOriginName] if !ok || originalName == "" { - r.Log.Error(fmt.Errorf("required label \"%s\" not found", kubelb.LabelOriginName), fmt.Sprintf("failed to queue service for LoadBalacner: %s, could not determine origin name", lb.GetName())) + r.Log.Error(fmt.Errorf("required label \"%s\" not found", kubelb.LabelOriginName), fmt.Sprintf("failed to queue service for LoadBalancer: %s, could not determine origin name", lb.GetName())) return []reconcile.Request{} } @@ -237,7 +233,7 @@ func (r *KubeLBServiceReconciler) enqueueLoadBalancer() handler.TypedMapFunc[*ku }) } -func (r *KubeLBServiceReconciler) getEndpoints(service *corev1.Service) []string { +func (r *KubeLBServiceReconciler) getEndpoints(service *corev1.Service) ([]string, bool) { var clusterEndpoints []string // Use LB Endpoint if there is any non KubeLb load balancer implementation @@ -250,10 +246,10 @@ func (r *KubeLBServiceReconciler) getEndpoints(service *corev1.Service) []string } } } else { - clusterEndpoints = r.Endpoints.ClusterEndpoints + return nil, true } - return clusterEndpoints + return clusterEndpoints, false } func (r *KubeLBServiceReconciler) shouldReconcile(svc corev1.Service) bool { @@ -271,8 +267,8 @@ func (r *KubeLBServiceReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&corev1.Service{}). WatchesRawSource( - source.Kind(r.KubeLBManager.GetCache(), &kubelbk8ciov1alpha1.LoadBalancer{}, - handler.TypedEnqueueRequestsFromMapFunc[*kubelbk8ciov1alpha1.LoadBalancer](r.enqueueLoadBalancer())), + source.Kind(r.KubeLBManager.GetCache(), &kubelbv1alpha1.LoadBalancer{}, + handler.TypedEnqueueRequestsFromMapFunc[*kubelbv1alpha1.LoadBalancer](r.enqueueLoadBalancer())), ). Complete(r) } diff --git a/internal/controllers/ccm/shared.go b/internal/controllers/ccm/shared.go index 9559563..c345b39 100644 --- a/internal/controllers/ccm/shared.go +++ b/internal/controllers/ccm/shared.go @@ -22,7 +22,7 @@ import ( "github.com/go-logr/logr" - kubelbk8ciov1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" + kubelbv1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" "k8c.io/kubelb/internal/resources/route" serviceHelpers "k8c.io/kubelb/internal/resources/service" "k8c.io/kubelb/internal/resources/unstructured" @@ -40,7 +40,7 @@ const ( func reconcileSourceForRoute(ctx context.Context, log logr.Logger, client ctrlclient.Client, lbClient ctrlclient.Client, resource ctrlclient.Object, originalServices []types.NamespacedName, referenceGrants []gwapiv1a2.ReferenceGrant, namespace string) error { log.V(2).Info("reconciling source for producing route") - unstructuredResource, err := unstructured.ConverObjectToUnstructured(resource) + unstructuredResource, err := unstructured.ConvertObjectToUnstructured(resource) if err != nil { return fmt.Errorf("failed to convert Ingress to unstructured: %w", err) } @@ -65,7 +65,7 @@ func reconcileSourceForRoute(ctx context.Context, log logr.Logger, client ctrlcl func cleanupRoute(ctx context.Context, client ctrlclient.Client, resourceUID string, namespace string) error { // Find the Route in LB cluster and delete it - route := kubelbk8ciov1alpha1.Route{} + route := kubelbv1alpha1.Route{} err := client.Get(ctx, types.NamespacedName{Name: resourceUID, Namespace: namespace}, &route) if err != nil { if !errors.IsNotFound(err) { diff --git a/internal/controllers/kubelb/envoy_cp_controller.go b/internal/controllers/kubelb/envoy_cp_controller.go index 9613c41..9ce4fb9 100644 --- a/internal/controllers/kubelb/envoy_cp_controller.go +++ b/internal/controllers/kubelb/envoy_cp_controller.go @@ -19,18 +19,33 @@ package kubelb import ( "context" "fmt" + "reflect" envoycachev3 "github.com/envoyproxy/go-control-plane/pkg/cache/v3" envoyresource "github.com/envoyproxy/go-control-plane/pkg/resource/v3" - kubelbk8ciov1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" + kubelbv1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" + "k8c.io/kubelb/internal/config" utils "k8c.io/kubelb/internal/controllers" envoycp "k8c.io/kubelb/internal/envoy" + "k8c.io/kubelb/internal/kubelb" portlookup "k8c.io/kubelb/internal/port-lookup" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + RequeueAllResources = "requeue-all-for-route" ) type EnvoyCPReconciler struct { @@ -38,6 +53,8 @@ type EnvoyCPReconciler struct { EnvoyCache envoycachev3.SnapshotCache EnvoyProxyTopology EnvoyProxyTopology PortAllocator *portlookup.PortAllocator + Namespace string + EnvoyBootstrap string } // +kubebuilder:rbac:groups=kubelb.k8c.io,resources=loadbalancers,verbs=get;list;watch @@ -51,68 +68,50 @@ func (r *EnvoyCPReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct } func (r *EnvoyCPReconciler) reconcile(ctx context.Context, req ctrl.Request) error { - var lb kubelbk8ciov1alpha1.LoadBalancer - err := r.Get(ctx, req.NamespacedName, &lb) - if err != nil && !apierrors.IsNotFound(err) { - return err + snapshotName, appName := envoySnapshotAndAppName(r.EnvoyProxyTopology, req) + + lbs, routes, err := r.ListLoadBalancersAndRoutes(ctx, req) + if err != nil { + return fmt.Errorf("failed to list LoadBalancers and Routes: %w", err) } - snapshotName := envoySnapshotName(r.EnvoyProxyTopology, req) - switch r.EnvoyProxyTopology { - case EnvoyProxyTopologyDedicated: - if apierrors.IsNotFound(err) || !lb.DeletionTimestamp.IsZero() { - r.EnvoyCache.ClearSnapshot(snapshotName) - return nil - } - return r.updateCache(ctx, snapshotName, []kubelbk8ciov1alpha1.LoadBalancer{lb}) - case EnvoyProxyTopologyShared: - lbs, err := r.listLBs(ctx, client.InNamespace(lb.Namespace)) - if err != nil { - return err - } - if len(lbs) == 0 { - r.EnvoyCache.ClearSnapshot(snapshotName) - return nil - } - return r.updateCache(ctx, snapshotName, lbs) - case EnvoyProxyTopologyGlobal: - lbs, err := r.listLBs(ctx) - if err != nil { - return err - } - if len(lbs) == 0 { - r.EnvoyCache.ClearSnapshot(snapshotName) - return nil - } + if len(lbs) == 0 && len(routes) == 0 { + r.EnvoyCache.ClearSnapshot(snapshotName) + return r.cleanupEnvoyProxy(ctx, appName, req.Namespace) + } - // For Global topology, we need to ensure that an arbitrary port has been assigned to the endpoint ports of the LoadBalancer. - lbList := kubelbk8ciov1alpha1.LoadBalancerList{ + if err := r.ensureEnvoyProxy(ctx, req.Namespace, appName, snapshotName); err != nil { + return fmt.Errorf("failed to update Envoy proxy: %w", err) + } + + // For Global topology, we need to ensure that an arbitrary port has been assigned to the endpoint ports of the LoadBalancer. + if r.EnvoyProxyTopology == EnvoyProxyTopologyGlobal { + lbList := kubelbv1alpha1.LoadBalancerList{ Items: lbs, } if err := r.PortAllocator.AllocatePortsForLoadBalancers(lbList); err != nil { return err } + } - return r.updateCache(ctx, snapshotName, lbs) + if err := r.PortAllocator.AllocatePortsForRoutes(routes); err != nil { + return err } - return fmt.Errorf("unknown envoy proxy topology: %v", r.EnvoyProxyTopology) + + return r.updateCache(ctx, snapshotName, lbs, routes) } -func (r *EnvoyCPReconciler) updateCache(ctx context.Context, snapshotName string, lbs []kubelbk8ciov1alpha1.LoadBalancer) error { +func (r *EnvoyCPReconciler) updateCache(ctx context.Context, snapshotName string, lbs []kubelbv1alpha1.LoadBalancer, routes []kubelbv1alpha1.Route) error { log := ctrl.LoggerFrom(ctx) - currentSnapshot, err := r.EnvoyCache.GetSnapshot(snapshotName) + desiredSnapshot, err := envoycp.MapSnapshot(ctx, r.Client, lbs, routes, r.PortAllocator, r.EnvoyProxyTopology == EnvoyProxyTopologyGlobal) if err != nil { - initSnapshot, err := envoycp.MapSnapshot(lbs, r.PortAllocator, r.EnvoyProxyTopology == EnvoyProxyTopologyGlobal) - if err != nil { - return fmt.Errorf("failed to init snapshot %q: %w", snapshotName, err) - } - log.Info("init snapshot", "service-node", snapshotName, "version", initSnapshot.GetVersion(envoyresource.ClusterType)) - return r.EnvoyCache.SetSnapshot(ctx, snapshotName, initSnapshot) + return err } - desiredSnapshot, err := envoycp.MapSnapshot(lbs, r.PortAllocator, r.EnvoyProxyTopology == EnvoyProxyTopologyGlobal) + currentSnapshot, err := r.EnvoyCache.GetSnapshot(snapshotName) if err != nil { - return err + log.Info("init snapshot", "service-node", snapshotName, "version", desiredSnapshot.GetVersion(envoyresource.ClusterType)) + return r.EnvoyCache.SetSnapshot(ctx, snapshotName, desiredSnapshot) } lastUsedVersion := currentSnapshot.GetVersion(envoyresource.ClusterType) @@ -135,35 +134,253 @@ func (r *EnvoyCPReconciler) updateCache(ctx context.Context, snapshotName string return nil } -func (r *EnvoyCPReconciler) listLBs(ctx context.Context, options ...client.ListOption) (lbs []kubelbk8ciov1alpha1.LoadBalancer, err error) { - var list kubelbk8ciov1alpha1.LoadBalancerList - err = r.List(ctx, &list, options...) - if err != nil { - return +func (r *EnvoyCPReconciler) ListLoadBalancersAndRoutes(ctx context.Context, req ctrl.Request) ([]kubelbv1alpha1.LoadBalancer, []kubelbv1alpha1.Route, error) { + loadBalancers := kubelbv1alpha1.LoadBalancerList{} + routes := kubelbv1alpha1.RouteList{} + var err error + + switch r.EnvoyProxyTopology { + case EnvoyProxyTopologyDedicated: + if req.Name != RequeueAllResources { + lb := kubelbv1alpha1.LoadBalancer{} + err = r.Get(ctx, req.NamespacedName, &lb) + if err != nil { + return nil, nil, err + } + loadBalancers.Items = append(loadBalancers.Items, lb) + } + case EnvoyProxyTopologyShared: + err = r.List(ctx, &loadBalancers, client.InNamespace(req.Namespace)) + if err != nil { + return nil, nil, err + } + + err = r.List(ctx, &routes, client.InNamespace(req.Namespace)) + if err != nil { + return nil, nil, err + } + case EnvoyProxyTopologyGlobal: + err = r.List(ctx, &loadBalancers) + if err != nil { + return nil, nil, err + } + + err = r.List(ctx, &routes) + if err != nil { + return nil, nil, err + } } - for _, lb := range list.Items { + + lbs := make([]kubelbv1alpha1.LoadBalancer, 0, len(loadBalancers.Items)) + for _, lb := range loadBalancers.Items { if lb.DeletionTimestamp.IsZero() { lbs = append(lbs, lb) } } - return + + routeList := make([]kubelbv1alpha1.Route, 0, len(routes.Items)) + for _, route := range routes.Items { + if route.DeletionTimestamp.IsZero() { + routeList = append(routeList, route) + } + } + + return lbs, routeList, nil } -func envoySnapshotName(topology EnvoyProxyTopology, req ctrl.Request) string { +func (r *EnvoyCPReconciler) cleanupEnvoyProxy(ctx context.Context, appName string, namespace string) error { + log := ctrl.LoggerFrom(ctx).WithValues("reconcile", "envoy-proxy") + log.V(2).Info("cleanup envoy-proxy") + + if r.EnvoyProxyTopology == EnvoyProxyTopologyGlobal { + namespace = r.Namespace + } + + objMeta := v1.ObjectMeta{ + Name: fmt.Sprintf(envoyResourcePattern, appName), + Namespace: namespace, + } + var envoyProxy ctrlruntimeclient.Object + if config.GetConfig().Spec.EnvoyProxy.UseDaemonset { + envoyProxy = &appsv1.DaemonSet{ + ObjectMeta: objMeta, + } + } else { + envoyProxy = &appsv1.Deployment{ + ObjectMeta: objMeta, + } + } + + log.V(2).Info("Deleting envoy proxy", "name", envoyProxy.GetName(), "namespace", envoyProxy.GetNamespace()) + if err := r.Delete(ctx, envoyProxy); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete envoy proxy %s: %w", envoyProxy.GetName(), err) + } + return nil +} + +func (r *EnvoyCPReconciler) ensureEnvoyProxy(ctx context.Context, namespace, appName, snapshotName string) error { + log := ctrl.LoggerFrom(ctx).WithValues("reconcile", "envoy-proxy") + log.V(2).Info("verify envoy-proxy") + + var envoyProxy ctrlruntimeclient.Object + objMeta := metav1.ObjectMeta{ + Name: fmt.Sprintf(envoyResourcePattern, appName), + Namespace: namespace, + } + if config.GetConfig().Spec.EnvoyProxy.UseDaemonset { + envoyProxy = &appsv1.DaemonSet{ + ObjectMeta: objMeta, + } + } else { + envoyProxy = &appsv1.Deployment{ + ObjectMeta: objMeta, + } + } + + err := r.Get(ctx, types.NamespacedName{ + Name: fmt.Sprintf(envoyResourcePattern, appName), + Namespace: namespace, + }, envoyProxy) + + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + original := envoyProxy.DeepCopyObject() + if config.GetConfig().Spec.EnvoyProxy.UseDaemonset { + daemonset := envoyProxy.(*appsv1.DaemonSet) + daemonset.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: map[string]string{kubelb.LabelAppKubernetesName: appName}, + } + daemonset.Spec.Template = r.getEnvoyProxyPodSpec(namespace, appName, snapshotName) + envoyProxy = daemonset + } else { + deployment := envoyProxy.(*appsv1.Deployment) + var replicas = config.GetConfig().Spec.EnvoyProxy.Replicas + deployment.Spec.Replicas = &replicas + deployment.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: map[string]string{kubelb.LabelAppKubernetesName: appName}, + } + deployment.Spec.Template = r.getEnvoyProxyPodSpec(namespace, appName, snapshotName) + envoyProxy = deployment + } + + if apierrors.IsNotFound(err) { + if err := r.Create(ctx, envoyProxy); err != nil { + return err + } + } else { + if !reflect.DeepEqual(original, envoyProxy) { + envoyProxy.SetManagedFields([]metav1.ManagedFieldsEntry{}) + envoyProxy.SetResourceVersion("") + if err := r.Patch(ctx, envoyProxy, ctrlruntimeclient.Apply, ctrlruntimeclient.ForceOwnership, ctrlruntimeclient.FieldOwner("kubelb")); err != nil { + return err + } + } + } + log.V(5).Info("desired", "envoy-proxy", envoyProxy) + + return nil +} + +func (r *EnvoyCPReconciler) getEnvoyProxyPodSpec(namespace, appName, snapshotName string) corev1.PodTemplateSpec { + envoyProxy := config.GetConfig().Spec.EnvoyProxy + template := corev1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Name: appName, + Namespace: namespace, + Labels: map[string]string{kubelb.LabelAppKubernetesName: appName}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: envoyProxyContainerName, + Image: envoyImage, + Args: []string{ + "--config-yaml", r.EnvoyBootstrap, + "--service-node", snapshotName, + "--service-cluster", namespace, + }, + }, + }, + }, + } + + if envoyProxy.Resources != nil { + template.Spec.Containers[0].Resources = *envoyProxy.Resources + } + + if envoyProxy.Affinity != nil { + template.Spec.Affinity = envoyProxy.Affinity + } + + if envoyProxy.Tolerations != nil && len(envoyProxy.Tolerations) > 0 { + template.Spec.Tolerations = envoyProxy.Tolerations + } + + if envoyProxy.NodeSelector != nil { + template.Spec.NodeSelector = envoyProxy.NodeSelector + } + + if envoyProxy.SinglePodPerNode { + template.Spec.TopologySpreadConstraints = []corev1.TopologySpreadConstraint{ + { + MaxSkew: 1, + TopologyKey: "kubernetes.io/hostname", + WhenUnsatisfiable: corev1.ScheduleAnyway, + LabelSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{kubelb.LabelAppKubernetesName: appName}, + }, + }, + } + } + return template +} + +func envoySnapshotAndAppName(topology EnvoyProxyTopology, req ctrl.Request) (string, string) { switch topology { case EnvoyProxyTopologyShared: - return req.Namespace + return req.Namespace, req.Namespace case EnvoyProxyTopologyDedicated: - return fmt.Sprintf("%s-%s", req.Namespace, req.Name) + return fmt.Sprintf("%s-%s", req.Namespace, req.Name), req.Name case EnvoyProxyTopologyGlobal: - return envoyGlobalCache + return EnvoyGlobalCache, EnvoyGlobalCache + } + return "", "" +} + +// enqueueLoadBalancers is a handler.MapFunc to be used to enqeue requests for reconciliation +// for LoadBalancers. +func (r *EnvoyCPReconciler) enqueueLoadBalancers() handler.MapFunc { + return func(_ context.Context, o ctrlruntimeclient.Object) []ctrl.Request { + result := []reconcile.Request{} + + result = append(result, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: RequeueAllResources, + Namespace: o.GetNamespace(), + }, + }) + + return result } - return "" } func (r *EnvoyCPReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + // 1. Watch for changes in LoadBalancer resources. + // 2. Resource must exist in a tenant namespace. + // 3. Watch for changes in Route resources and enqueue LoadBalancer resources. TODO: we need to + // find an alternative for this since it is more of a "hack". return ctrl.NewControllerManagedBy(mgr). - For(&kubelbk8ciov1alpha1.LoadBalancer{}). + For(&kubelbv1alpha1.LoadBalancer{}). WithEventFilter(utils.ByLabelExistsOnNamespace(ctx, mgr.GetClient())). + Watches( + &kubelbv1alpha1.Route{}, + handler.EnqueueRequestsFromMapFunc(r.enqueueLoadBalancers()), + ). + Watches( + &kubelbv1alpha1.Addresses{}, + handler.EnqueueRequestsFromMapFunc(r.enqueueLoadBalancers()), + ). Complete(r) } diff --git a/internal/controllers/kubelb/loadbalancer_controller.go b/internal/controllers/kubelb/loadbalancer_controller.go index eceba25..dd66cab 100644 --- a/internal/controllers/kubelb/loadbalancer_controller.go +++ b/internal/controllers/kubelb/loadbalancer_controller.go @@ -22,14 +22,13 @@ import ( "reflect" "strings" - kubelbk8ciov1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" + kubelbv1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" "k8c.io/kubelb/internal/config" utils "k8c.io/kubelb/internal/controllers" "k8c.io/kubelb/internal/kubelb" kuberneteshelper "k8c.io/kubelb/internal/kubernetes" portlookup "k8c.io/kubelb/internal/port-lookup" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -54,7 +53,7 @@ const ( envoyResourcePattern = "envoy-%s" envoyGlobalTopologyServicePattern = "envoy-%s-%s" envoyProxyCleanupFinalizer = "kubelb.k8c.io/cleanup-envoy-proxy" - envoyGlobalCache = "global" + EnvoyGlobalCache = "global" ) type EnvoyProxyTopology string @@ -72,9 +71,7 @@ type LoadBalancerReconciler struct { Cache cache.Cache Namespace string - PortAllocator *portlookup.PortAllocator - - EnvoyBootstrap string + PortAllocator *portlookup.PortAllocator EnvoyProxyTopology EnvoyProxyTopology } @@ -82,6 +79,8 @@ type LoadBalancerReconciler struct { // +kubebuilder:rbac:groups=kubelb.k8c.io,resources=loadbalancers/status,verbs=get;update;patch // +kubebuilder:rbac:groups=kubelb.k8c.io,resources=configs,verbs=get;list;watch // +kubebuilder:rbac:groups=kubelb.k8c.io,resources=configs/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=kubelb.k8c.io,resources=addresses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=kubelb.k8c.io,resources=addresses/status,verbs=get // +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch // +kubebuilder:rbac:groups="apps",resources=deployments,verbs=get;list;watch;create;update;patch;delete @@ -92,7 +91,7 @@ func (r *LoadBalancerReconciler) Reconcile(ctx context.Context, req ctrl.Request log.V(2).Info("reconciling LoadBalancer") - var loadBalancer kubelbk8ciov1alpha1.LoadBalancer + var loadBalancer kubelbv1alpha1.LoadBalancer err := r.Get(ctx, req.NamespacedName, &loadBalancer) if err != nil { if ctrlruntimeclient.IgnoreNotFound(err) != nil { @@ -113,8 +112,7 @@ func (r *LoadBalancerReconciler) Reconcile(ctx context.Context, req ctrl.Request // In case of shared envoy proxy topology, we need to fetch all load balancers. Otherwise, we only need to fetch the current one. // To keep things generic, we always propagate a list of load balancers here. var ( - loadBalancers kubelbk8ciov1alpha1.LoadBalancerList - appName string + loadBalancers kubelbv1alpha1.LoadBalancerList resourceNamespace string ) @@ -125,7 +123,6 @@ func (r *LoadBalancerReconciler) Reconcile(ctx context.Context, req ctrl.Request log.Error(err, "unable to fetch LoadBalancer list") return ctrl.Result{}, err } - appName = req.Namespace resourceNamespace = req.Namespace case EnvoyProxyTopologyGlobal: // List all loadbalancers. We don't care about the namespace here. @@ -135,18 +132,15 @@ func (r *LoadBalancerReconciler) Reconcile(ctx context.Context, req ctrl.Request log.Error(err, "unable to fetch LoadBalancer list") return ctrl.Result{}, err } - appName = envoyGlobalCache resourceNamespace = r.Namespace case EnvoyProxyTopologyDedicated: - loadBalancers.Items = []kubelbk8ciov1alpha1.LoadBalancer{loadBalancer} - appName = loadBalancer.Name + loadBalancers.Items = []kubelbv1alpha1.LoadBalancer{loadBalancer} resourceNamespace = req.Namespace } - // Resource is marked for deletion. if loadBalancer.DeletionTimestamp != nil { if kuberneteshelper.HasFinalizer(&loadBalancer, envoyProxyCleanupFinalizer) { - return reconcile.Result{}, r.handleEnvoyProxyCleanup(ctx, loadBalancer, len(loadBalancers.Items), appName, resourceNamespace) + return reconcile.Result{}, r.handleEnvoyProxyCleanup(ctx, loadBalancer, resourceNamespace) } // Finalizer doesn't exist so clean up is already done return reconcile.Result{}, nil @@ -167,13 +161,7 @@ func (r *LoadBalancerReconciler) Reconcile(ctx context.Context, req ctrl.Request } } - snapshotName := envoySnapshotName(r.EnvoyProxyTopology, req) - err = r.reconcileEnvoyProxy(ctx, resourceNamespace, appName, snapshotName) - if err != nil { - log.Error(err, "Unable to reconcile envoy proxy") - return ctrl.Result{}, err - } - + _, appName := envoySnapshotAndAppName(r.EnvoyProxyTopology, req) err = r.reconcileService(ctx, &loadBalancer, appName, resourceNamespace, r.PortAllocator) if err != nil { log.Error(err, "Unable to reconcile service") @@ -183,72 +171,7 @@ func (r *LoadBalancerReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, nil } -func (r *LoadBalancerReconciler) reconcileEnvoyProxy(ctx context.Context, namespace, appName, snapshotName string) error { - log := ctrl.LoggerFrom(ctx).WithValues("reconcile", "envoy-proxy") - log.V(2).Info("verify envoy-proxy") - - var envoyProxy ctrlruntimeclient.Object - objMeta := v1.ObjectMeta{ - Name: fmt.Sprintf(envoyResourcePattern, appName), - Namespace: namespace, - } - if config.GetConfig().Spec.EnvoyProxy.UseDaemonset { - envoyProxy = &appsv1.DaemonSet{ - ObjectMeta: objMeta, - } - } else { - envoyProxy = &appsv1.Deployment{ - ObjectMeta: objMeta, - } - } - - err := r.Get(ctx, types.NamespacedName{ - Name: fmt.Sprintf(envoyResourcePattern, appName), - Namespace: namespace, - }, envoyProxy) - - if err != nil && !apierrors.IsNotFound(err) { - return err - } - - original := envoyProxy.DeepCopyObject() - if config.GetConfig().Spec.EnvoyProxy.UseDaemonset { - daemonset := envoyProxy.(*appsv1.DaemonSet) - daemonset.Spec.Selector = &v1.LabelSelector{ - MatchLabels: map[string]string{kubelb.LabelAppKubernetesName: appName}, - } - daemonset.Spec.Template = r.getEnvoyProxyPodSpec(namespace, appName, snapshotName) - envoyProxy = daemonset - } else { - deployment := envoyProxy.(*appsv1.Deployment) - var replicas = config.GetConfig().Spec.EnvoyProxy.Replicas - deployment.Spec.Replicas = &replicas - deployment.Spec.Selector = &v1.LabelSelector{ - MatchLabels: map[string]string{kubelb.LabelAppKubernetesName: appName}, - } - deployment.Spec.Template = r.getEnvoyProxyPodSpec(namespace, appName, snapshotName) - envoyProxy = deployment - } - - if apierrors.IsNotFound(err) { - if err := r.Create(ctx, envoyProxy); err != nil { - return err - } - } else { - if !reflect.DeepEqual(original, envoyProxy) { - envoyProxy.SetManagedFields([]v1.ManagedFieldsEntry{}) - envoyProxy.SetResourceVersion("") - if err := r.Patch(ctx, envoyProxy, ctrlruntimeclient.Apply, ctrlruntimeclient.ForceOwnership, ctrlruntimeclient.FieldOwner("kubelb")); err != nil { - return err - } - } - } - log.V(5).Info("desired", "envoy-proxy", envoyProxy) - - return nil -} - -func (r *LoadBalancerReconciler) reconcileService(ctx context.Context, loadBalancer *kubelbk8ciov1alpha1.LoadBalancer, appName, namespace string, portAllocator *portlookup.PortAllocator) error { +func (r *LoadBalancerReconciler) reconcileService(ctx context.Context, loadBalancer *kubelbv1alpha1.LoadBalancer, appName, namespace string, portAllocator *portlookup.PortAllocator) error { log := ctrl.LoggerFrom(ctx).WithValues("reconcile", "service") log.V(2).Info("verify service") @@ -348,10 +271,10 @@ func (r *LoadBalancerReconciler) reconcileService(ctx context.Context, loadBalan // Status changes log.V(5).Info("load balancer status", "LoadBalancer", loadBalancer.Status.LoadBalancer.Ingress, "service", service.Status.LoadBalancer.Ingress) - updatedPorts := []kubelbk8ciov1alpha1.ServicePort{} + updatedPorts := []kubelbv1alpha1.ServicePort{} for i, port := range service.Spec.Ports { targetPort := loadBalancer.Spec.Endpoints[0].Ports[i].Port - updatedPorts = append(updatedPorts, kubelbk8ciov1alpha1.ServicePort{ + updatedPorts = append(updatedPorts, kubelbv1alpha1.ServicePort{ ServicePort: port, // In case of global topology, this will be different from the targetPort. Otherwise it will be the same. UpstreamTargetPort: targetPort, @@ -360,8 +283,8 @@ func (r *LoadBalancerReconciler) reconcileService(ctx context.Context, loadBalan // Update status if needed updateStatus := false - updatedLoadBalanacerStatus := kubelbk8ciov1alpha1.LoadBalancerStatus{ - Service: kubelbk8ciov1alpha1.ServiceStatus{ + updatedLoadBalanacerStatus := kubelbv1alpha1.LoadBalancerStatus{ + Service: kubelbv1alpha1.ServiceStatus{ Ports: updatedPorts, }, LoadBalancer: service.Status.LoadBalancer, @@ -385,7 +308,7 @@ func (r *LoadBalancerReconciler) reconcileService(ctx context.Context, loadBalan log.V(3).Info("updating LoadBalancer status", "name", loadBalancer.Name, "namespace", loadBalancer.Namespace) return retry.RetryOnConflict(retry.DefaultRetry, func() error { - lb := &kubelbk8ciov1alpha1.LoadBalancer{} + lb := &kubelbv1alpha1.LoadBalancer{} if err := r.Get(ctx, types.NamespacedName{Name: loadBalancer.Name, Namespace: loadBalancer.Namespace}, lb); err != nil { return err } @@ -399,34 +322,10 @@ func (r *LoadBalancerReconciler) reconcileService(ctx context.Context, loadBalan }) } -func (r *LoadBalancerReconciler) handleEnvoyProxyCleanup(ctx context.Context, lb kubelbk8ciov1alpha1.LoadBalancer, lbCount int, appName, resourceNamespace string) error { +func (r *LoadBalancerReconciler) handleEnvoyProxyCleanup(ctx context.Context, lb kubelbv1alpha1.LoadBalancer, resourceNamespace string) error { log := ctrl.LoggerFrom(ctx).WithValues("cleanup", "LoadBalancer") - log.V(2).Info("Cleaning up LoadBalancer", "name", lb.Name, "namespace", lb.Namespace) - // We can delete the envoy proxy deployment if there are no other load balancers. - if lbCount == 1 { - objMeta := v1.ObjectMeta{ - Name: fmt.Sprintf(envoyResourcePattern, appName), - Namespace: resourceNamespace, - } - var envoyProxy ctrlruntimeclient.Object - if config.GetConfig().Spec.EnvoyProxy.UseDaemonset { - envoyProxy = &appsv1.DaemonSet{ - ObjectMeta: objMeta, - } - } else { - envoyProxy = &appsv1.Deployment{ - ObjectMeta: objMeta, - } - } - - log.V(2).Info("Deleting envoy proxy", "name", envoyProxy.GetName(), "namespace", envoyProxy.GetNamespace()) - if err := r.Delete(ctx, envoyProxy); err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("failed to delete envoy proxy %s: %v against LoadBalancer %w", envoyProxy.GetName(), fmt.Sprintf("%s/%s", lb.Name, lb.Namespace), err) - } - } - // Deallocate ports if we are using global envoy proxy topology. if r.EnvoyProxyTopology == EnvoyProxyTopologyGlobal { if err := r.PortAllocator.DeallocatePortsForLoadBalancer(lb); err != nil { @@ -465,10 +364,10 @@ func (r *LoadBalancerReconciler) handleEnvoyProxyCleanup(ctx context.Context, lb func (r *LoadBalancerReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&kubelbk8ciov1alpha1.LoadBalancer{}). + For(&kubelbv1alpha1.LoadBalancer{}). WithEventFilter(utils.ByLabelExistsOnNamespace(ctx, mgr.GetClient())). Watches( - &kubelbk8ciov1alpha1.Config{}, + &kubelbv1alpha1.Config{}, handler.EnqueueRequestsFromMapFunc(r.enqueueLoadBalancersForConfig()), builder.WithPredicates(filterServicesPredicate()), ). @@ -479,60 +378,6 @@ func (r *LoadBalancerReconciler) SetupWithManager(ctx context.Context, mgr ctrl. Complete(r) } -func (r *LoadBalancerReconciler) getEnvoyProxyPodSpec(namespace, appName, snapshotName string) corev1.PodTemplateSpec { - envoyProxy := config.GetConfig().Spec.EnvoyProxy - template := corev1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{ - Name: appName, - Namespace: namespace, - Labels: map[string]string{kubelb.LabelAppKubernetesName: appName}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: envoyProxyContainerName, - Image: envoyImage, - Args: []string{ - "--config-yaml", r.EnvoyBootstrap, - "--service-node", snapshotName, - "--service-cluster", namespace, - }, - }, - }, - }, - } - - if envoyProxy.Resources != nil { - template.Spec.Containers[0].Resources = *envoyProxy.Resources - } - - if envoyProxy.Affinity != nil { - template.Spec.Affinity = envoyProxy.Affinity - } - - if envoyProxy.Tolerations != nil && len(envoyProxy.Tolerations) > 0 { - template.Spec.Tolerations = envoyProxy.Tolerations - } - - if envoyProxy.NodeSelector != nil { - template.Spec.NodeSelector = envoyProxy.NodeSelector - } - - if envoyProxy.SinglePodPerNode { - template.Spec.TopologySpreadConstraints = []corev1.TopologySpreadConstraint{ - { - MaxSkew: 1, - TopologyKey: "kubernetes.io/hostname", - WhenUnsatisfiable: corev1.ScheduleAnyway, - LabelSelector: &v1.LabelSelector{ - MatchLabels: map[string]string{kubelb.LabelAppKubernetesName: appName}, - }, - }, - } - } - return template -} - // enqueueLoadBalancers is a handler.MapFunc to be used to enqeue requests for reconciliation // for LoadBalancers against the corresponding service. func (r *LoadBalancerReconciler) enqueueLoadBalancers() handler.MapFunc { @@ -600,7 +445,7 @@ func propagateAnnotations(permitted map[string]string, loadbalancer map[string]s a := make(map[string]string) permittedMap := make(map[string][]string) for k, v := range permitted { - if strings.HasPrefix(k, kubelbk8ciov1alpha1.PropagateAnnotation) { + if strings.HasPrefix(k, kubelbv1alpha1.PropagateAnnotation) { filter := strings.SplitN(k, "=", 2) if len(filter) <= 1 { permittedMap[v] = []string{} @@ -649,7 +494,7 @@ func (r *LoadBalancerReconciler) enqueueLoadBalancersForConfig() handler.MapFunc result := []reconcile.Request{} // Reload the Config for the controller. - conf := &kubelbk8ciov1alpha1.Config{} + conf := &kubelbv1alpha1.Config{} err := r.Get(ctx, types.NamespacedName{Name: config.DefaultConfigResourceName, Namespace: r.Namespace}, conf) if err != nil { return result @@ -657,7 +502,7 @@ func (r *LoadBalancerReconciler) enqueueLoadBalancersForConfig() handler.MapFunc config.SetConfig(*conf) // List all loadbalancers. We don't care about the namespace here. - loadBalancers := &kubelbk8ciov1alpha1.LoadBalancerList{} + loadBalancers := &kubelbv1alpha1.LoadBalancerList{} err = r.List(ctx, loadBalancers) if err != nil { return result diff --git a/internal/controllers/kubelb/loadbalancer_controller_test.go b/internal/controllers/kubelb/loadbalancer_controller_test.go index 4a7284b..4714bac 100644 --- a/internal/controllers/kubelb/loadbalancer_controller_test.go +++ b/internal/controllers/kubelb/loadbalancer_controller_test.go @@ -26,7 +26,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - kubelbk8ciov1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" + kubelbv1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" envoycp "k8c.io/kubelb/internal/envoy" "k8c.io/kubelb/internal/kubelb" @@ -131,7 +131,7 @@ var _ = Describe("Lb deployment and service creation", func() { snapshot, err := envoyServer.Cache.GetSnapshot(snapshotName) Expect(err).ToNot(HaveOccurred()) - testSnapshot, err := envoycp.MapSnapshot(getLoadBalancerList(*lb), ecpr.PortAllocator, t.topology == EnvoyProxyTopologyGlobal) + testSnapshot, err := envoycp.MapSnapshot(ctx, k8sClient, getLoadBalancerList(*lb), nil, ecpr.PortAllocator, t.topology == EnvoyProxyTopologyGlobal) Expect(err).ToNot(HaveOccurred()) diff := deep.Equal(snapshot, testSnapshot) if len(diff) > 0 { @@ -143,7 +143,7 @@ var _ = Describe("Lb deployment and service creation", func() { Context(fmt.Sprintf("When updating an existing LoadBalancers Ports with %v topology", t.topology), func() { It("Should update the load balancer service and envoy snapshot", func() { - existingLb := &kubelbk8ciov1alpha1.LoadBalancer{} + existingLb := &kubelbv1alpha1.LoadBalancer{} Eventually(func() error { return k8sClient.Get(ctx, lbLookupKey, existingLb) @@ -155,12 +155,12 @@ var _ = Describe("Lb deployment and service creation", func() { existingLb.Spec.Ports[0].Name = "port-a" existingLb.Spec.Endpoints[0].Ports[0].Name = "port-a" - existingLb.Spec.Ports = append(existingLb.Spec.Ports, kubelbk8ciov1alpha1.LoadBalancerPort{ + existingLb.Spec.Ports = append(existingLb.Spec.Ports, kubelbv1alpha1.LoadBalancerPort{ Name: "port-b", Port: 81, }) - existingLb.Spec.Endpoints[0].Ports = append(existingLb.Spec.Endpoints[0].Ports, kubelbk8ciov1alpha1.EndpointPort{ + existingLb.Spec.Endpoints[0].Ports = append(existingLb.Spec.Endpoints[0].Ports, kubelbv1alpha1.EndpointPort{ Name: "port-b", Port: 8081, }) @@ -195,67 +195,17 @@ var _ = Describe("Lb deployment and service creation", func() { snapshot, err := envoyServer.Cache.GetSnapshot(snapshotName) Expect(err).ToNot(HaveOccurred()) - testSnapshot, err := envoycp.MapSnapshot(getLoadBalancerList(*existingLb), ecpr.PortAllocator, t.topology == EnvoyProxyTopologyGlobal) + testSnapshot, err := envoycp.MapSnapshot(ctx, k8sClient, getLoadBalancerList(*existingLb), nil, ecpr.PortAllocator, t.topology == EnvoyProxyTopologyGlobal) Expect(err).ToNot(HaveOccurred()) diff := deep.Equal(snapshot, testSnapshot) if len(diff) > 0 { fmt.Printf("expected snapshot didn't match generated snapshot, diff: %+v", diff) } + Expect(len(diff)).To(Equal(0)) listener := snapshot.GetResources(resource.ListenerType) Expect(len(listener)).To(BeEquivalentTo(2)) - /* - aListenerAny, err := ptypes.MarshalAny(listener["port-a"]) - Expect(err).ToNot(HaveOccurred()) - aListener := &listenerv3.Listener{} - err = ptypes.UnmarshalAny(aListenerAny, aListener) - Expect(err).ToNot(HaveOccurred()) - - Expect(aListener.Name).To(Equal("port-a")) - - socketAddress := aListener.Address.Address.(*envoyCore.Address_SocketAddress) - socketPortValue := socketAddress.SocketAddress.PortSpecifier.(*envoyCore.SocketAddress_PortValue) - Expect(socketPortValue.PortValue).To(Equal(uint32(80))) - - bListenerAny, err := ptypes.MarshalAny(listener["port-b"]) - Expect(err).ToNot(HaveOccurred()) - bListener := &listenerv3.Listener{} - err = ptypes.UnmarshalAny(bListenerAny, bListener) - Expect(err).ToNot(HaveOccurred()) - - Expect(bListener.Name).To(Equal("port-b")) - - socketAddress = bListener.Address.Address.(*envoyCore.Address_SocketAddress) - socketPortValue = socketAddress.SocketAddress.PortSpecifier.(*envoyCore.SocketAddress_PortValue) - Expect(socketPortValue.PortValue).To(Equal(uint32(81))) - - By("updating the envoy cluster") - - cluster := snapshot.GetResources(resource.ClusterType) - - ClusterAny, err := ptypes.MarshalAny(cluster["default-port-a"]) - Expect(err).ToNot(HaveOccurred()) - envoyCluster := &clusterv3.Cluster{} - err = ptypes.UnmarshalAny(ClusterAny, envoyCluster) - Expect(err).ToNot(HaveOccurred()) - clusterLbEndpoint := envoyCluster.LoadAssignment.Endpoints[0].LbEndpoints[0].HostIdentifier.(*endpointv3.LbEndpoint_Endpoint) - socketAddress = clusterLbEndpoint.Endpoint.Address.Address.(*envoyCore.Address_SocketAddress) - socketPortValue = socketAddress.SocketAddress.PortSpecifier.(*envoyCore.SocketAddress_PortValue) - - Expect(socketPortValue.PortValue).To(Equal(uint32(8080))) - - ClusterAny, err = ptypes.MarshalAny(cluster["default-port-b"]) - Expect(err).ToNot(HaveOccurred()) - envoyCluster = &clusterv3.Cluster{} - err = ptypes.UnmarshalAny(ClusterAny, envoyCluster) - Expect(err).ToNot(HaveOccurred()) - clusterLbEndpoint = envoyCluster.LoadAssignment.Endpoints[0].LbEndpoints[0].HostIdentifier.(*endpointv3.LbEndpoint_Endpoint) - socketAddress = clusterLbEndpoint.Endpoint.Address.Address.(*envoyCore.Address_SocketAddress) - socketPortValue = socketAddress.SocketAddress.PortSpecifier.(*envoyCore.SocketAddress_PortValue) - - Expect(socketPortValue.PortValue).To(Equal(uint32(8081))) - */ }) }) @@ -280,6 +230,6 @@ var _ = Describe("Lb deployment and service creation", func() { } }) -func getLoadBalancerList(lb kubelbk8ciov1alpha1.LoadBalancer) []kubelbk8ciov1alpha1.LoadBalancer { - return []kubelbk8ciov1alpha1.LoadBalancer{lb} +func getLoadBalancerList(lb kubelbv1alpha1.LoadBalancer) []kubelbv1alpha1.LoadBalancer { + return []kubelbv1alpha1.LoadBalancer{lb} } diff --git a/internal/controllers/kubelb/route_controller.go b/internal/controllers/kubelb/route_controller.go new file mode 100644 index 0000000..274803d --- /dev/null +++ b/internal/controllers/kubelb/route_controller.go @@ -0,0 +1,468 @@ +/* +Copyright 2024 The KubeLB Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelb + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "time" + + "github.com/go-logr/logr" + + kubelbv1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" + "k8c.io/kubelb/internal/config" + "k8c.io/kubelb/internal/kubelb" + kuberneteshelper "k8c.io/kubelb/internal/kubernetes" + portlookup "k8c.io/kubelb/internal/port-lookup" + serviceHelpers "k8c.io/kubelb/internal/resources/service" + "k8c.io/kubelb/internal/resources/unstructured" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/equality" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + RouteControllerName = "route-controller" + CleanupFinalizer = "kubelb.k8c.io/cleanup" +) + +// RouteReconciler reconciles a Route Object +type RouteReconciler struct { + ctrlclient.Client + Log logr.Logger + Scheme *runtime.Scheme + Recorder record.EventRecorder + + PortAllocator *portlookup.PortAllocator + EnvoyProxyTopology EnvoyProxyTopology +} + +// +kubebuilder:rbac:groups=kubelb.k8c.io,resources=routes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=kubelb.k8c.io,resources=routes/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses/status,verbs=get;update;patch + +func (r *RouteReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("name", req.NamespacedName) + + log.Info("Reconciling") + + resource := &kubelbv1alpha1.Route{} + if err := r.Get(ctx, req.NamespacedName, resource); err != nil { + if kerrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Resource is marked for deletion + if resource.DeletionTimestamp != nil { + if kuberneteshelper.HasFinalizer(resource, CleanupFinalizer) { + return r.cleanup(ctx, resource) + } + // Finalizer doesn't exist so clean up is already done + return reconcile.Result{}, nil + } + + // Add finalizer if it doesn't exist + if !kuberneteshelper.HasFinalizer(resource, CleanupFinalizer) { + kuberneteshelper.AddFinalizer(resource, CleanupFinalizer) + if err := r.Update(ctx, resource); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to add finalizer: %w", err) + } + } + + err := r.reconcile(ctx, log, resource) + if err != nil { + log.Error(err, "reconciling failed") + } + + return reconcile.Result{}, err +} + +func (r *RouteReconciler) reconcile(ctx context.Context, log logr.Logger, route *kubelbv1alpha1.Route) error { + // Create or update services based on the route. + err := r.manageServices(ctx, log, route) + if err != nil { + return fmt.Errorf("failed to create or update services: %w", err) + } + + // Create or update the route object. + err = r.manageRoutes(ctx, log, route) + if err != nil { + return fmt.Errorf("failed to create or update route: %w", err) + } + + return nil +} + +func (r *RouteReconciler) cleanup(ctx context.Context, route *kubelbv1alpha1.Route) (ctrl.Result, error) { + // Route will be removed automatically because of owner reference. We need to take care of removing + // the services while ensuring that the services are not being used by other routes. + + if route.Status.Resources.Services == nil { + return reconcile.Result{}, nil + } + + for _, value := range route.Status.Resources.Services { + log := r.Log.WithValues("name", value.Name, "namespace", value.Namespace) + log.V(1).Info("Deleting service", "name", value.GeneratedName, "namespace", route.Namespace) + svc := corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: value.GeneratedName, + Namespace: route.Namespace, + }, + } + if err := r.Client.Delete(ctx, &svc); err != nil { + if !kerrors.IsNotFound(err) { + return reconcile.Result{}, fmt.Errorf("failed to delete service: %w", err) + } + } + } + + // De-allocate the ports allocated for the services. + if err := r.PortAllocator.DeallocatePortsForRoute(*route); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to deallocate ports: %w", err) + } + + kuberneteshelper.RemoveFinalizer(route, CleanupFinalizer) + if err := r.Update(ctx, route); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to remove finalizer: %w", err) + } + + return reconcile.Result{}, nil +} + +func (r *RouteReconciler) manageServices(ctx context.Context, log logr.Logger, route *kubelbv1alpha1.Route) error { + if route.Spec.Source.Kubernetes == nil { + return nil + } + + // Before creating/updating services, ensure that the orphaned services are cleaned up. + err := r.cleanupOrphanedServices(ctx, log, route) + if err != nil { + return fmt.Errorf("failed to cleanup orphaned services: %w", err) + } + + // Allocate ports for the services. These ports are then used as the target ports for the services. + if err := r.PortAllocator.AllocatePortsForRoutes([]kubelbv1alpha1.Route{*route}); err != nil { + return err + } + + appName := envoyApplicationName(r.EnvoyProxyTopology, route.Namespace) + services := []corev1.Service{} + for _, service := range route.Spec.Source.Kubernetes.Services { + // Transform the service into desired state. + svc := serviceHelpers.GenerateServiceForLBCluster(service.Service, appName, route.Namespace, r.PortAllocator) + services = append(services, svc) + } + + routeStatus := route.Status.DeepCopy() + for _, svc := range services { + log.V(4).Info("Creating/Updating service", "name", svc.Name, "namespace", svc.Namespace) + var err error + if err = serviceHelpers.CreateOrUpdateService(ctx, r.Client, &svc); err != nil { + // We only log the error and set the condition to false. The error will be set in the status. + log.Error(err, "failed to create or update Service", "name", svc.Name, "namespace", svc.Namespace) + errorMessage := fmt.Errorf("failed to create or update Service: %w", err) + r.Recorder.Eventf(route, corev1.EventTypeWarning, "ServiceApplyFailed", errorMessage.Error()) + } + updateServiceStatus(routeStatus, &svc, err) + } + return r.UpdateRouteStatus(ctx, route, *routeStatus) +} + +func (r *RouteReconciler) cleanupOrphanedServices(ctx context.Context, log logr.Logger, route *kubelbv1alpha1.Route) error { + // Get all the services based on route. + desiredServices := map[string]bool{} + for _, service := range route.Spec.Source.Kubernetes.Services { + name := serviceHelpers.GetServiceName(service.Service) + key := fmt.Sprintf(kubelb.RouteServiceMapKey, service.Service.Namespace, name) + desiredServices[key] = true + } + + if route.Status.Resources.Services == nil { + return nil + } + + for key, value := range route.Status.Resources.Services { + if _, ok := desiredServices[key]; !ok { + // Service is not desired, so delete it. + log.V(4).Info("Deleting orphaned service", "name", value.GeneratedName, "namespace", route.Namespace) + svc := corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: value.GeneratedName, + Namespace: route.Namespace, + }, + } + if err := r.Client.Delete(ctx, &svc); err != nil { + if !kerrors.IsNotFound(err) { + return fmt.Errorf("failed to delete orphaned service: %w", err) + } + } + delete(route.Status.Resources.Services, key) + + endpointKey := fmt.Sprintf(kubelb.EnvoyEndpointRoutePattern, route.Namespace, value.Namespace, svc.Name) + // De-allocate the ports allocated for the service. + r.PortAllocator.DeallocateEndpoints([]string{endpointKey}) + } + } + return nil +} + +func (r *RouteReconciler) UpdateRouteStatus(ctx context.Context, route *kubelbv1alpha1.Route, status kubelbv1alpha1.RouteStatus) error { + key := ctrlclient.ObjectKeyFromObject(route) + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + // Fetch the current state + if err := r.Client.Get(ctx, key, route); err != nil { + return err + } + + // Update the status + original := route.DeepCopy() + route.Status = status + + // If the status has not changed, no need to update. + if reflect.DeepEqual(original.Status, route.Status) { + return nil + } + + // Update the route + return r.Client.Status().Patch(ctx, route, ctrlruntimeclient.MergeFrom(original)) + }) +} + +func (r *RouteReconciler) manageRoutes(ctx context.Context, log logr.Logger, route *kubelbv1alpha1.Route) error { + if route.Spec.Source.Kubernetes == nil { + return nil + } + + resource, err := unstructured.ConvertUnstructuredToObject(&route.Spec.Source.Kubernetes.Route) + if err != nil { + return fmt.Errorf("failed to convert route to object: %w", err) + } + + ownerReference := metav1.OwnerReference{ + APIVersion: route.APIVersion, + Kind: route.Kind, + Name: route.Name, + UID: route.UID, + } + + // Set owner reference for the resource. + resource.SetOwnerReferences([]metav1.OwnerReference{ownerReference}) + + // Get the services referenced by the route. + var referencedServices []metav1.ObjectMeta + for _, service := range route.Spec.Source.Kubernetes.Services { + name := serviceHelpers.GetServiceName(service.Service) + objectMeta := metav1.ObjectMeta{ + Name: name, + Namespace: service.Service.Namespace, + UID: service.Service.UID, + } + referencedServices = append(referencedServices, objectMeta) + } + + routeStatus := route.Status.DeepCopy() + + // Determine the type of the resource and call the appropriate method + switch v := resource.(type) { + case *v1.Ingress: // Assuming v1 "k8s.io/api/networking/v1" + err = r.createOrUpdateIngress(ctx, log, v, referencedServices, route.Namespace) + if err == nil { + // Retrieve updated Ingress object to get the status. + ingressKey := client.ObjectKey{Namespace: v.Namespace, Name: v.Name} + ingress := &v1.Ingress{} + if err := r.Client.Get(ctx, ingressKey, ingress); err != nil { + if !kerrors.IsNotFound(err) { + return fmt.Errorf("failed to get Ingress: %w", err) + } + } + updateResourceStatus(routeStatus, ingress, err) + } + + default: + log.V(4).Info("Unsupported resource type") + } + + return r.UpdateRouteStatus(ctx, route, *routeStatus) +} + +// createOrUpdateIngress creates or updates the Ingress object in the cluster. +func (r *RouteReconciler) createOrUpdateIngress(ctx context.Context, log logr.Logger, ingress *v1.Ingress, referencedServices []metav1.ObjectMeta, namespace string) error { + // Name of the services referenced by the Ingress have to be updated to match the services created against the Route in the LB cluster. + for i, rule := range ingress.Spec.Rules { + for j, path := range rule.HTTP.Paths { + for _, service := range referencedServices { + if path.Backend.Service.Name == service.Name { + ingress.Spec.Rules[i].HTTP.Paths[j].Backend.Service.Name = kubelb.GenerateName(false, string(service.UID), service.Name, service.Namespace) + } + } + } + } + + if ingress.Spec.DefaultBackend != nil && ingress.Spec.DefaultBackend.Service != nil { + for _, service := range referencedServices { + if ingress.Spec.DefaultBackend.Service.Name == service.Name { + ingress.Spec.DefaultBackend.Service.Name = kubelb.GenerateName(false, string(service.UID), service.Name, service.Namespace) + } + } + } + + ingress.Spec.IngressClassName = config.GetConfig().Spec.IngressClassName + ingress.Name = kubelb.GenerateName(false, string(ingress.UID), ingress.Name, ingress.Namespace) + ingress.Namespace = namespace + ingress.SetUID("") // Reset UID to generate a new UID for the Ingress object + + log.V(4).Info("Creating/Updating Ingress", "name", ingress.Name, "namespace", ingress.Namespace) + // Check if it already exists. + ingressKey := client.ObjectKey{Namespace: ingress.Namespace, Name: ingress.Name} + existingIngress := &v1.Ingress{} + if err := r.Client.Get(ctx, ingressKey, existingIngress); err != nil { + if !kerrors.IsNotFound(err) { + return fmt.Errorf("failed to get Ingress: %w", err) + } + err := r.Client.Create(ctx, ingress) + if err != nil { + return fmt.Errorf("failed to create Ingress: %w", err) + } + return nil + } + + // Update the Ingress object if it is different from the existing one. + if equality.Semantic.DeepEqual(existingIngress.Spec, ingress.Spec) && + equality.Semantic.DeepEqual(existingIngress.Labels, ingress.Labels) && + equality.Semantic.DeepEqual(existingIngress.Annotations, ingress.Annotations) { + return nil + } + + if err := r.Client.Update(ctx, ingress); err != nil { + return fmt.Errorf("failed to update Ingress: %w", err) + } + return nil +} + +func updateServiceStatus(routeStatus *kubelbv1alpha1.RouteStatus, svc *corev1.Service, err error) { + originalName := serviceHelpers.GetServiceName(*svc) + originalNamespace := serviceHelpers.GetServiceNamespace(*svc) + status := kubelbv1alpha1.RouteServiceStatus{ + ResourceState: kubelbv1alpha1.ResourceState{ + GeneratedName: svc.GetName(), + Namespace: originalNamespace, + Name: originalName, + }, + Ports: svc.Spec.Ports, + } + status.Conditions = generateConditions(err) + + svcStatus, err := json.Marshal(svc.Status) + if err != nil { + // If we are unable to marshal the status, we set it to empty object. There is no need to fail the reconciliation. + svcStatus = []byte(kubelb.DefaultRouteStatus) + } + + status.Status = runtime.RawExtension{ + Raw: svcStatus, + } + if routeStatus.Resources.Services == nil { + routeStatus.Resources.Services = make(map[string]kubelbv1alpha1.RouteServiceStatus) + } + key := fmt.Sprintf(kubelb.RouteServiceMapKey, originalNamespace, originalName) + routeStatus.Resources.Services[key] = status +} + +func updateResourceStatus(routeStatus *kubelbv1alpha1.RouteStatus, obj client.Object, err error) { + status := kubelbv1alpha1.ResourceState{ + GeneratedName: obj.GetName(), + Namespace: kubelb.GetNamespace(obj), + Name: kubelb.GetName(obj), + APIVersion: obj.GetObjectKind().GroupVersionKind().GroupVersion().String(), + Kind: obj.GetObjectKind().GroupVersionKind().Kind, + } + status.Conditions = generateConditions(err) + + var resourceStatus []byte + //nolint:gocritic + switch resource := obj.(type) { + case *v1.Ingress: + resourceStatus, err = json.Marshal(resource.Status) + if err != nil { + // If we are unable to marshal the status, we set it to empty object. There is no need to fail the reconciliation. + resourceStatus = []byte(kubelb.DefaultRouteStatus) + } + status.Status = runtime.RawExtension{ + Raw: resourceStatus, + } + routeStatus.Resources.Route = status + } +} + +func generateConditions(err error) []metav1.Condition { + conditionMessage := "Success" + conditionStatus := metav1.ConditionTrue + conditionReason := "InstallationSuccessful" + if err != nil { + conditionMessage = err.Error() + conditionStatus = metav1.ConditionFalse + conditionReason = "InstallationFailed" + } + return []metav1.Condition{ + { + Type: kubelbv1alpha1.ConditionResourceAppliedSuccessfully.String(), + Reason: conditionReason, + Status: conditionStatus, + LastTransitionTime: metav1.Time{ + Time: time.Now(), + }, + Message: conditionMessage, + }, + } +} + +func envoyApplicationName(topology EnvoyProxyTopology, namespace string) string { + switch topology { + case EnvoyProxyTopologyShared: + return namespace + case EnvoyProxyTopologyGlobal: + return EnvoyGlobalCache + } + return "" +} + +func (r *RouteReconciler) SetupWithManager(mgr ctrl.Manager) error { + // 1. Watch for changes in Route object. + // 2. Skip reconciliation if generation is not changed; only status/metadata changed. + return ctrl.NewControllerManagedBy(mgr). + For(&kubelbv1alpha1.Route{}). + WithEventFilter(predicate.GenerationChangedPredicate{}). + Complete(r) +} diff --git a/internal/controllers/kubelb/suite_test.go b/internal/controllers/kubelb/suite_test.go index f02461d..e654227 100644 --- a/internal/controllers/kubelb/suite_test.go +++ b/internal/controllers/kubelb/suite_test.go @@ -116,7 +116,6 @@ var _ = BeforeSuite(func() { Client: k8sManager.GetClient(), Cache: k8sManager.GetCache(), Scheme: k8sManager.GetScheme(), - EnvoyBootstrap: envoyServer.GenerateBootstrap(), EnvoyProxyTopology: EnvoyProxyTopologyDedicated, Namespace: LBNamespace, PortAllocator: portAllocator, @@ -128,6 +127,8 @@ var _ = BeforeSuite(func() { Client: k8sManager.GetClient(), EnvoyCache: envoyServer.Cache, EnvoyProxyTopology: EnvoyProxyTopologyDedicated, + EnvoyBootstrap: envoyServer.GenerateBootstrap(), + Namespace: LBNamespace, PortAllocator: portAllocator, } err = ecpr.SetupWithManager(ctx, k8sManager) diff --git a/internal/envoy/resource.go b/internal/envoy/resource.go index becbd5b..4f4f7b3 100644 --- a/internal/envoy/resource.go +++ b/internal/envoy/resource.go @@ -17,6 +17,7 @@ limitations under the License. package envoy import ( + "context" "fmt" "time" @@ -38,20 +39,43 @@ import ( "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/durationpb" - kubelbiov1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" + kubelbv1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" "k8c.io/kubelb/internal/kubelb" portlookup "k8c.io/kubelb/internal/port-lookup" corev1 "k8s.io/api/core/v1" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) -func MapSnapshot(loadBalancers []kubelbiov1alpha1.LoadBalancer, portAllocator *portlookup.PortAllocator, globalEnvoyProxyTopology bool) (*envoycache.Snapshot, error) { +const ( + endpointAddressReferencePattern = "%s-address-%s" +) + +func MapSnapshot(ctx context.Context, client ctrlclient.Client, loadBalancers []kubelbv1alpha1.LoadBalancer, routes []kubelbv1alpha1.Route, portAllocator *portlookup.PortAllocator, globalEnvoyProxyTopology bool) (*envoycache.Snapshot, error) { var listener []types.Resource var cluster []types.Resource + addressesMap := make(map[string][]kubelbv1alpha1.EndpointAddress) for _, lb := range loadBalancers { // multiple endpoints represent multiple clusters for i, lbEndpoint := range lb.Spec.Endpoints { + if lbEndpoint.AddressesReference != nil { + // Check if map already contains the key + if val, ok := addressesMap[fmt.Sprintf(endpointAddressReferencePattern, lb.Namespace, lbEndpoint.AddressesReference.Name)]; ok { + lb.Spec.Endpoints[i].Addresses = val + continue + } + + // Load addresses from reference + var addresses kubelbv1alpha1.Addresses + if err := client.Get(ctx, ctrlclient.ObjectKey{Namespace: lb.Namespace, Name: lbEndpoint.AddressesReference.Name}, &addresses); err != nil { + return nil, fmt.Errorf("failed to get addresses: %w", err) + } + addressesMap[fmt.Sprintf(endpointAddressReferencePattern, lb.Namespace, lbEndpoint.AddressesReference.Name)] = addresses.Spec.Addresses + lb.Spec.Endpoints[i].Addresses = addresses.Spec.Addresses + lbEndpoint.Addresses = addresses.Spec.Addresses + } + for _, lbEndpointPort := range lbEndpoint.Ports { var lbEndpoints []*envoyEndpoint.LbEndpoint key := fmt.Sprintf(kubelb.EnvoyResourceIdentifierPattern, lb.Namespace, lb.Name, i, lbEndpointPort.Port, lbEndpointPort.Protocol) @@ -80,6 +104,56 @@ func MapSnapshot(loadBalancers []kubelbiov1alpha1.LoadBalancer, portAllocator *p } } + for _, route := range routes { + if route.Spec.Source.Kubernetes == nil { + continue + } + for i, routeendpoint := range route.Spec.Endpoints { + if routeendpoint.AddressesReference != nil { + // Check if map already contains the key + if val, ok := addressesMap[fmt.Sprintf(endpointAddressReferencePattern, route.Namespace, routeendpoint.AddressesReference.Name)]; ok { + route.Spec.Endpoints[i].Addresses = val + continue + } + + // Load addresses from reference + var addresses kubelbv1alpha1.Addresses + if err := client.Get(ctx, ctrlclient.ObjectKey{Namespace: route.Namespace, Name: routeendpoint.AddressesReference.Name}, &addresses); err != nil { + return nil, fmt.Errorf("failed to get addresses: %w", err) + } + addressesMap[fmt.Sprintf(endpointAddressReferencePattern, route.Namespace, routeendpoint.AddressesReference.Name)] = addresses.Spec.Addresses + route.Spec.Endpoints[i].Addresses = addresses.Spec.Addresses + } + } + source := route.Spec.Source.Kubernetes + for _, svc := range source.Services { + endpointKey := fmt.Sprintf(kubelb.EnvoyEndpointRoutePattern, route.Namespace, svc.Namespace, svc.Name) + for _, port := range svc.Spec.Ports { + portLookupKey := fmt.Sprintf(kubelb.EnvoyListenerPattern, port.Port, port.Protocol) + var lbEndpoints []*envoyEndpoint.LbEndpoint + for _, address := range route.Spec.Endpoints { + for _, routeEndpoints := range address.Addresses { + lbEndpoints = append(lbEndpoints, makeEndpoint(routeEndpoints.IP, uint32(port.NodePort))) + } + } + + listenerPort := uint32(port.Port) + if value, exists := portAllocator.Lookup(endpointKey, portLookupKey); exists { + listenerPort = uint32(value) + } + + key := fmt.Sprintf(kubelb.EnvoyRoutePortIdentifierPattern, route.Namespace, svc.Namespace, svc.Name, svc.UID, port.Port, port.Protocol) + + if port.Protocol == corev1.ProtocolTCP { + listener = append(listener, makeTCPListener(key, key, listenerPort)) + } else if port.Protocol == corev1.ProtocolUDP { + listener = append(listener, makeUDPListener(key, key, listenerPort)) + } + cluster = append(cluster, makeCluster(key, lbEndpoints)) + } + } + } + var content []byte var resources []types.Resource resources = append(resources, cluster...) diff --git a/internal/kubelb/client.go b/internal/kubelb/client.go deleted file mode 100644 index a50043a..0000000 --- a/internal/kubelb/client.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2020 The KubeLB Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubelb - -import ( - "os" - "path/filepath" - - "k8s.io/client-go/tools/clientcmd" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// Client for the KubeLb kubernetes Custer. -func NewClient(kubeConfPath string) (client.Client, error) { - var kubeconfig string - if kubeConfPath == "" { - kubeconfig = filepath.Join( - os.Getenv("HOME"), ".kube", "kubelb", - ) - } else { - kubeconfig = kubeConfPath - } - - restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig) - if err != nil { - return nil, err - } - - return client.New(restConfig, client.Options{}) -} diff --git a/internal/kubelb/endpoints.go b/internal/kubelb/endpoints.go deleted file mode 100644 index ebcf9f4..0000000 --- a/internal/kubelb/endpoints.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2020 The KubeLB Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubelb - -import corev1 "k8s.io/api/core/v1" - -type Endpoints struct { - ClusterEndpoints []string - EndpointAddressType corev1.NodeAddressType -} - -func (r *Endpoints) GetEndpoints(nodes *corev1.NodeList) []string { - var clusterEndpoints []string - for _, node := range nodes.Items { - var internalIP string - for _, address := range node.Status.Addresses { - if address.Type == r.EndpointAddressType { - internalIP = address.Address - } - } - clusterEndpoints = append(clusterEndpoints, internalIP) - } - return clusterEndpoints -} - -func (r *Endpoints) EndpointIsDesiredState(desired *corev1.NodeList) bool { - desiredEndpoints := r.GetEndpoints(desired) - if len(r.ClusterEndpoints) != len(desiredEndpoints) { - return false - } - - for _, endpoint := range desiredEndpoints { - if !contains(r.ClusterEndpoints, endpoint) { - return false - } - } - - return true -} - -func contains(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} diff --git a/internal/kubelb/loadbalancer.go b/internal/kubelb/loadbalancer.go index 1df3506..0d368b9 100644 --- a/internal/kubelb/loadbalancer.go +++ b/internal/kubelb/loadbalancer.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func MapLoadBalancer(userService *corev1.Service, clusterEndpoints []string, clusterName string) *kubelbiov1alpha1.LoadBalancer { +func MapLoadBalancer(userService *corev1.Service, clusterEndpoints []string, useAddressesReference bool, clusterName string) *kubelbiov1alpha1.LoadBalancer { var lbServicePorts []kubelbiov1alpha1.LoadBalancerPort var lbEndpointSubsets []kubelbiov1alpha1.LoadBalancerEndpoints var lbEndpointPorts []kubelbiov1alpha1.EndpointPort @@ -45,17 +45,25 @@ func MapLoadBalancer(userService *corev1.Service, clusterEndpoints []string, clu }) } - var endpointAddresses []kubelbiov1alpha1.EndpointAddress - for _, endpoint := range clusterEndpoints { - endpointAddresses = append(endpointAddresses, kubelbiov1alpha1.EndpointAddress{ - IP: endpoint, - }) + lbEndpoints := kubelbiov1alpha1.LoadBalancerEndpoints{ + Ports: lbEndpointPorts, + } + + if useAddressesReference { + lbEndpoints.AddressesReference = &corev1.ObjectReference{ + Name: kubelbiov1alpha1.DefaultAddressName, + } + } else { + var endpointAddresses []kubelbiov1alpha1.EndpointAddress + for _, endpoint := range clusterEndpoints { + endpointAddresses = append(endpointAddresses, kubelbiov1alpha1.EndpointAddress{ + IP: endpoint, + }) + } + lbEndpoints.Addresses = endpointAddresses } - lbEndpointSubsets = append(lbEndpointSubsets, kubelbiov1alpha1.LoadBalancerEndpoints{ - Addresses: endpointAddresses, - Ports: lbEndpointPorts, - }) + lbEndpointSubsets = append(lbEndpointSubsets, lbEndpoints) return &kubelbiov1alpha1.LoadBalancer{ ObjectMeta: metav1.ObjectMeta{ diff --git a/internal/kubelb/utils.go b/internal/kubelb/utils.go index 52f8917..9da00e3 100644 --- a/internal/kubelb/utils.go +++ b/internal/kubelb/utils.go @@ -16,6 +16,12 @@ limitations under the License. package kubelb +import ( + "fmt" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + // TODO(waleed): Rename to origin-namespace const LabelOriginNamespace = "kubelb.k8c.io/origin-ns" const LabelOriginName = "kubelb.k8c.io/origin-name" @@ -36,4 +42,45 @@ const LabelAppKubernetesManagedBy = "app.kubernetes.io/managed-by" // helm const EnvoyResourceIdentifierPattern = "%s-%s-ep-%d-port-%d-%s" const EnvoyEndpointPattern = "%s-%s-ep-%d" -const EnvoyListenerPattern = "%d-%s" +const EnvoyEndpointRoutePattern = "tenant-%s-route-%s-%s" +const EnvoyRoutePortIdentifierPattern = "tenant-%s-route-%s-%s-svc-%s-port-%d-%s" +const EnvoyListenerPattern = "%v-%s" +const RouteServiceMapKey = "%s/%s" +const DefaultRouteStatus = "{}" + +const NameSuffixLength = 4 + +func GenerateName(useUID bool, uid, name, namespace string) string { + if useUID { + return uid + } + + output := fmt.Sprintf("%s-%s", namespace, name) + // If the output is longer than 63 characters, truncate the name and append a suffix + if len(output) > 63 { + output = output[:63-NameSuffixLength+1] + output = fmt.Sprintf("%s-%s", output, uid[len(uid)-NameSuffixLength:]) + } + + return output +} + +func GetName(obj client.Object) string { + name := obj.GetName() + if labels := obj.GetLabels(); labels != nil { + if _, ok := labels[LabelOriginName]; ok { + name = labels[LabelOriginName] + } + } + return name +} + +func GetNamespace(obj client.Object) string { + namespace := obj.GetNamespace() + if labels := obj.GetLabels(); labels != nil { + if _, ok := labels[LabelOriginNamespace]; ok { + namespace = labels[LabelOriginNamespace] + } + } + return namespace +} diff --git a/internal/port-lookup/allocator.go b/internal/port-lookup/allocator.go index 5404407..5e23f74 100644 --- a/internal/port-lookup/allocator.go +++ b/internal/port-lookup/allocator.go @@ -22,7 +22,7 @@ import ( "math/rand" "sync" - kubelbk8ciov1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" + kubelbv1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" "k8c.io/kubelb/internal/kubelb" "k8s.io/utils/strings/slices" @@ -56,7 +56,6 @@ func NewPortAllocator() *PortAllocator { func (pa *PortAllocator) GetPortLookupTable() LookupTable { return pa.portLookup } - func (pa *PortAllocator) Lookup(endpointKey, portKey string) (int, bool) { if endpointLookup, exists := pa.portLookup[endpointKey]; exists { if port, exists := endpointLookup[portKey]; exists { @@ -151,7 +150,7 @@ func (pa *PortAllocator) LoadState(ctx context.Context, apiReader client.Reader) lookupTable := make(LookupTable) // We use the API reader here because the cache may not be fully synced yet. - loadBalancers := &kubelbk8ciov1alpha1.LoadBalancerList{} + loadBalancers := &kubelbv1alpha1.LoadBalancerList{} err := apiReader.List(ctx, loadBalancers) if err != nil { return err @@ -177,6 +176,34 @@ func (pa *PortAllocator) LoadState(ctx context.Context, apiReader client.Reader) } } + routes := &kubelbv1alpha1.RouteList{} + err = apiReader.List(ctx, routes) + if err != nil { + return err + } + + for _, route := range routes.Items { + if route.Spec.Source.Kubernetes == nil { + continue + } + for _, svc := range route.Spec.Source.Kubernetes.Services { + endpointKey := fmt.Sprintf(kubelb.EnvoyEndpointRoutePattern, route.Namespace, svc.Namespace, svc.Name) + if _, exists := lookupTable[endpointKey]; !exists { + lookupTable[endpointKey] = make(map[string]int) + } + + // The assigned port is stored in the status of the service. + if route.Status.Resources.Services != nil { + key := fmt.Sprintf(kubelb.RouteServiceMapKey, svc.Namespace, kubelb.GetName(&svc)) + if svcPort, exists := route.Status.Resources.Services[key]; exists { + for _, port := range svcPort.Ports { + portKey := fmt.Sprintf(kubelb.EnvoyListenerPattern, port.Port, port.Protocol) + lookupTable[endpointKey][portKey] = port.TargetPort.IntValue() + } + } + } + } + } pa.portLookup = lookupTable pa.recomputeAvailablePorts() return nil diff --git a/internal/port-lookup/mapper.go b/internal/port-lookup/mapper.go index 3c02610..cb13427 100644 --- a/internal/port-lookup/mapper.go +++ b/internal/port-lookup/mapper.go @@ -19,12 +19,12 @@ package portlookup import ( "fmt" - kubelbk8ciov1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" + kubelbv1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" "k8c.io/kubelb/internal/kubelb" ) // AllocatePortsForLoadBalancers allocates ports to the given load balancers. If a port is already allocated, it will be skipped. -func (pa *PortAllocator) AllocatePortsForLoadBalancers(loadBalancers kubelbk8ciov1alpha1.LoadBalancerList) error { +func (pa *PortAllocator) AllocatePortsForLoadBalancers(loadBalancers kubelbv1alpha1.LoadBalancerList) error { for _, lb := range loadBalancers.Items { for i, lbEndpoint := range lb.Spec.Endpoints { endpointKey := fmt.Sprintf(kubelb.EnvoyEndpointPattern, lb.Namespace, lb.Name, i) @@ -40,8 +40,43 @@ func (pa *PortAllocator) AllocatePortsForLoadBalancers(loadBalancers kubelbk8cio return nil } +// AllocatePortsForRoutes allocates ports for the routes. If a port is already allocated, it will be skipped. +func (pa *PortAllocator) AllocatePortsForRoutes(routes []kubelbv1alpha1.Route) error { + for _, route := range routes { + if route.Spec.Source.Kubernetes == nil { + continue + } + + for _, svc := range route.Spec.Source.Kubernetes.Services { + endpointKey := fmt.Sprintf(kubelb.EnvoyEndpointRoutePattern, route.Namespace, svc.Namespace, svc.Name) + var keys []string + for _, svcPort := range svc.Spec.Ports { + keys = append(keys, fmt.Sprintf(kubelb.EnvoyListenerPattern, svcPort.Port, svcPort.Protocol)) + } + // If a port is already allocated, it will be skipped. + pa.AllocatePorts(endpointKey, keys) + } + } + return nil +} + +// DeallocatePortsForRoutes deallocates ports for the route. +func (pa *PortAllocator) DeallocatePortsForRoute(route kubelbv1alpha1.Route) error { + if route.Spec.Source.Kubernetes == nil { + return nil + } + + var keys []string + for _, svc := range route.Spec.Source.Kubernetes.Services { + keys = append(keys, fmt.Sprintf(kubelb.EnvoyEndpointRoutePattern, route.Namespace, svc.Namespace, svc.Name)) + } + + pa.DeallocateEndpoints(keys) + return nil +} + // DeallocatePortsForLoadBalancer deallocates ports against the given load balancer. -func (pa *PortAllocator) DeallocatePortsForLoadBalancer(loadBalancer kubelbk8ciov1alpha1.LoadBalancer) error { +func (pa *PortAllocator) DeallocatePortsForLoadBalancer(loadBalancer kubelbv1alpha1.LoadBalancer) error { var endpointKeys []string for i := range loadBalancer.Spec.Endpoints { diff --git a/internal/resources/ingress/ingress.go b/internal/resources/ingress/ingress.go new file mode 100644 index 0000000..643b5a3 --- /dev/null +++ b/internal/resources/ingress/ingress.go @@ -0,0 +1,53 @@ +/* +Copyright 2024 The KubeLB Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package route + +import ( + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/types" +) + +// GetServicesFromIngress returns a list of services referenced by the given Ingress. +func GetServicesFromIngress(ingress networkingv1.Ingress) []types.NamespacedName { + serviceReferences := make([]types.NamespacedName, 0) + for _, rule := range ingress.Spec.Rules { + for _, path := range rule.HTTP.Paths { + serviceReferences = append(serviceReferences, types.NamespacedName{ + Name: path.Backend.Service.Name, + Namespace: ingress.Namespace, + }) + } + } + + if ingress.Spec.DefaultBackend != nil && ingress.Spec.DefaultBackend.Service != nil { + serviceReferences = append(serviceReferences, types.NamespacedName{ + Name: ingress.Spec.DefaultBackend.Service.Name, + Namespace: ingress.Namespace, + }) + } + + // Remove duplicates from the list. + keys := make(map[types.NamespacedName]bool) + list := []types.NamespacedName{} + for _, entry := range serviceReferences { + if _, value := keys[entry]; !value { + keys[entry] = true + list = append(list, entry) + } + } + return list +} diff --git a/internal/resources/route/route.go b/internal/resources/route/route.go index a8018e8..d0a3e72 100644 --- a/internal/resources/route/route.go +++ b/internal/resources/route/route.go @@ -23,7 +23,7 @@ import ( "github.com/go-logr/logr" - kubelbk8ciov1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" + kubelbv1alpha1 "k8c.io/kubelb/api/kubelb.k8c.io/v1alpha1" "k8c.io/kubelb/internal/kubelb" corev1 "k8s.io/api/core/v1" @@ -45,8 +45,8 @@ func CreateRouteForResource(ctx context.Context, _ logr.Logger, client ctrlclien return CreateUpdateRoute(ctx, client, generateRoute) } -func GenerateRoute(resource unstructured.Unstructured, resources Subresources, namespace string) kubelbk8ciov1alpha1.Route { - return kubelbk8ciov1alpha1.Route{ +func GenerateRoute(resource unstructured.Unstructured, resources Subresources, namespace string) kubelbv1alpha1.Route { + return kubelbv1alpha1.Route{ ObjectMeta: metav1.ObjectMeta{ Name: string(resource.GetUID()), Namespace: namespace, @@ -57,20 +57,29 @@ func GenerateRoute(resource unstructured.Unstructured, resources Subresources, n kubelb.LabelManagedBy: kubelb.LabelControllerName, }, }, - Spec: kubelbk8ciov1alpha1.RouteSpec{ - Source: kubelbk8ciov1alpha1.RouteSource{ - Kubernetes: &kubelbk8ciov1alpha1.KubernetesSource{ + Spec: kubelbv1alpha1.RouteSpec{ + // TODO(waleed): Once we have everything in place, figure out how this should look like. + Endpoints: []kubelbv1alpha1.LoadBalancerEndpoints{ + { + Name: "default", + AddressesReference: &corev1.ObjectReference{ + Name: kubelbv1alpha1.DefaultAddressName, + }, + }, + }, + Source: kubelbv1alpha1.RouteSource{ + Kubernetes: &kubelbv1alpha1.KubernetesSource{ Route: resource, - Services: kubelbk8ciov1alpha1.ConvertServicesToUpstreamServices(resources.Services), - ReferenceGrants: kubelbk8ciov1alpha1.ConvertReferenceGrantsToUpstreamReferenceGrants(resources.ReferenceGrants), + Services: kubelbv1alpha1.ConvertServicesToUpstreamServices(resources.Services), + ReferenceGrants: kubelbv1alpha1.ConvertReferenceGrantsToUpstreamReferenceGrants(resources.ReferenceGrants), }, }, }, } } -func CreateUpdateRoute(ctx context.Context, client ctrlclient.Client, route kubelbk8ciov1alpha1.Route) error { - existingRoute := kubelbk8ciov1alpha1.Route{} +func CreateUpdateRoute(ctx context.Context, client ctrlclient.Client, route kubelbv1alpha1.Route) error { + existingRoute := kubelbv1alpha1.Route{} err := client.Get(ctx, types.NamespacedName{Name: route.Name, Namespace: route.Namespace}, &existingRoute) if err != nil && !errors.IsNotFound(err) { return fmt.Errorf("failed to get Route from LB cluster: %w", err) diff --git a/internal/resources/service/service.go b/internal/resources/service/service.go index ae43ebe..db4bc1b 100644 --- a/internal/resources/service/service.go +++ b/internal/resources/service/service.go @@ -23,12 +23,14 @@ import ( "github.com/go-logr/logr" "k8c.io/kubelb/internal/kubelb" + portlookup "k8c.io/kubelb/internal/port-lookup" + "k8c.io/reconciler/pkg/equality" corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" + "k8s.io/apimachinery/pkg/util/intstr" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -75,6 +77,7 @@ func NormalizeAndReplicateServices(ctx context.Context, log logr.Logger, client } service.Labels[kubelb.LabelOriginNamespace] = service.Namespace service.Labels[kubelb.LabelOriginName] = service.Name + service.Labels[kubelb.LabelManagedBy] = kubelb.LabelControllerName service.Name = fmt.Sprintf(NodePortServicePattern, service.Name) servicesToCreateUpdate = append(servicesToCreateUpdate, *service) @@ -83,9 +86,7 @@ func NormalizeAndReplicateServices(ctx context.Context, log logr.Logger, client for _, svc := range servicesToCreateUpdate { log.V(4).Info("Creating/Updating service", "name", svc.Name, "namespace", svc.Namespace) - if _, err := ctrl.CreateOrUpdate(ctx, client, &svc, func() error { - return nil - }); err != nil { + if err := CreateOrUpdateService(ctx, client, &svc); err != nil { return nil, fmt.Errorf("failed to create or update Service: %w", err) } } @@ -106,6 +107,84 @@ func NormalizeAndReplicateServices(ctx context.Context, log logr.Logger, client return services, nil } +func GenerateServiceForLBCluster(service corev1.Service, appName, namespace string, portAllocator *portlookup.PortAllocator) corev1.Service { + endpointKey := fmt.Sprintf(kubelb.EnvoyEndpointRoutePattern, namespace, service.Namespace, service.Name) + + service.Name = kubelb.GenerateName(false, string(service.UID), GetServiceName(service), service.Namespace) + service.Namespace = namespace + service.UID = "" + if service.Spec.Type == corev1.ServiceTypeNodePort { + service.Spec.Type = corev1.ServiceTypeClusterIP + } + + // Use the nodePort(s) assigned as the targetPort for the new service. This is required to route traffic back to the actual service. + for i, port := range service.Spec.Ports { + portKey := fmt.Sprintf(kubelb.EnvoyListenerPattern, port.Port, port.Protocol) + targetPort := port.NodePort + if value, exists := portAllocator.Lookup(endpointKey, portKey); exists { + targetPort = int32(value) + } + + port.TargetPort = intstr.FromInt(int(targetPort)) + port.NodePort = 0 + service.Spec.Ports[i] = port + } + + // Replace the selector with the envoy proxy selector. + service.Spec.Selector = map[string]string{ + kubelb.LabelAppKubernetesName: appName, + } + + return service +} + +func CreateOrUpdateService(ctx context.Context, client ctrlclient.Client, obj *corev1.Service) error { + key := ctrlclient.ObjectKey{Namespace: obj.Namespace, Name: obj.Name} + existingObj := &corev1.Service{} + if err := client.Get(ctx, key, existingObj); err != nil { + if !kerrors.IsNotFound(err) { + return fmt.Errorf("failed to get Service: %w", err) + } + err := client.Create(ctx, obj) + if err != nil { + return fmt.Errorf("failed to create Service: %w", err) + } + return nil + } + + // Update the Service object if it is different from the existing one. + if equality.Semantic.DeepEqual(existingObj.Spec, obj.Spec) && + equality.Semantic.DeepEqual(existingObj.Labels, obj.Labels) && + equality.Semantic.DeepEqual(existingObj.Annotations, obj.Annotations) { + return nil + } + + if err := client.Update(ctx, obj); err != nil { + return fmt.Errorf("failed to update Service: %w", err) + } + return nil +} + +func GetServiceName(service corev1.Service) string { + name := service.Name + if labels := service.Labels; labels != nil { + if _, ok := labels[kubelb.LabelOriginName]; ok { + name = service.Labels[kubelb.LabelOriginName] + } + } + return name +} + +func GetServiceNamespace(service corev1.Service) string { + namespace := service.Namespace + if labels := service.Labels; labels != nil { + if _, ok := labels[kubelb.LabelOriginNamespace]; ok { + namespace = service.Labels[kubelb.LabelOriginNamespace] + } + } + return namespace +} + func cleanseService(svc corev1.Service, removeUID, removeClusterSpecificFields bool) *corev1.Service { obj := &corev1.Service{ TypeMeta: svc.TypeMeta, diff --git a/internal/resources/unstructured/unstructured.go b/internal/resources/unstructured/unstructured.go index 9d0bc63..8cba131 100644 --- a/internal/resources/unstructured/unstructured.go +++ b/internal/resources/unstructured/unstructured.go @@ -20,6 +20,7 @@ import ( "fmt" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -35,6 +36,7 @@ func NormalizeUnstructured(obj *unstructured.Unstructured) *unstructured.Unstruc clone.SetLabels(obj.GetLabels()) clone.SetAnnotations(obj.GetAnnotations()) clone.SetUID(obj.GetUID()) + clone.SetOwnerReferences(nil) clone.Object["spec"] = obj.Object["spec"] @@ -45,10 +47,28 @@ func NormalizeUnstructured(obj *unstructured.Unstructured) *unstructured.Unstruc return clone } -func ConverObjectToUnstructured(object client.Object) (*unstructured.Unstructured, error) { +func ConvertObjectToUnstructured(object client.Object) (*unstructured.Unstructured, error) { unstruct, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object) if err != nil { return nil, fmt.Errorf("failed to convert resource to unstructured: %w", err) } return &unstructured.Unstructured{Object: unstruct}, nil } + +func ConvertUnstructuredToObject(unstruct *unstructured.Unstructured) (client.Object, error) { + var object client.Object + gvk := unstruct.GetObjectKind().GroupVersionKind() + + switch gvk { + case networkingv1.SchemeGroupVersion.WithKind("Ingress"): + object = &networkingv1.Ingress{} + case corev1.SchemeGroupVersion.WithKind("Service"): + object = &corev1.Service{} + } + + err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstruct.UnstructuredContent(), object) + if err != nil { + return nil, fmt.Errorf("failed to convert unstructured to resource: %w", err) + } + return object, nil +}