diff --git a/Makefile b/Makefile index 2e6070d..89130a4 100644 --- a/Makefile +++ b/Makefile @@ -57,8 +57,9 @@ clean: ## Cleans up the generated resources rm -rf .tmpvendor .PHONY: run +RUN_TARGET ?= manager run: generate fmt vet ## Run a controller from your host. - go run ./main.go + go run ./main.go "-target=$(RUN_TARGET)" ### ### Assets diff --git a/config/samples/machineset-cloudscale-known-working.yml b/config/samples/machineset-cloudscale-known-working.yml new file mode 100644 index 0000000..3449f65 --- /dev/null +++ b/config/samples/machineset-cloudscale-known-working.yml @@ -0,0 +1,43 @@ +apiVersion: machine.openshift.io/v1beta1 +kind: MachineSet +metadata: + name: app + namespace: openshift-machine-api + labels: + machine.openshift.io/cluster-api-cluster: c-appuio-lab-cloudscale-rma-0 + name: app +spec: + deletePolicy: Oldest + replicas: 0 + selector: + matchLabels: + machine.openshift.io/cluster-api-cluster: c-appuio-lab-cloudscale-rma-0 + machine.openshift.io/cluster-api-machineset: app + template: + metadata: + labels: + machine.openshift.io/cluster-api-cluster: c-appuio-lab-cloudscale-rma-0 + machine.openshift.io/cluster-api-machine-role: app + machine.openshift.io/cluster-api-machine-type: app + machine.openshift.io/cluster-api-machineset: app + spec: + lifecycleHooks: {} + metadata: + labels: + node-role.kubernetes.io/app: "" + node-role.kubernetes.io/worker: "" + providerSpec: + value: + zone: rma1 + baseDomain: lab-cloudscale-rma-0.appuio.cloud + flavor: flex-16-4 + image: custom:rhcos-4.15 + rootVolumeSizeGB: 100 + antiAffinityKey: app + interfaces: + - type: Private + networkUUID: fd2b132d-f5d0-4024-b99f-68e5321ab4d1 + userDataSecret: + name: cloudscale-user-data + tokenSecret: + name: cloudscale-rw-token diff --git a/controllers/machine_api_controllers_controller.go b/controllers/machine_api_controllers_controller.go new file mode 100644 index 0000000..57c5945 --- /dev/null +++ b/controllers/machine_api_controllers_controller.go @@ -0,0 +1,125 @@ +package controllers + +import ( + "context" + _ "embed" + "encoding/json" + "fmt" + + "github.com/google/go-jsonnet" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +const ( + imagesConfigMapName = "machine-api-operator-images" + originalUpstreamDeploymentName = "machine-api-controllers" + imageKey = "images.json" + + caBundleConfigMapName = "appuio-machine-api-ca-bundle" +) + +//go:embed machine_api_controllers_deployment.jsonnet +var deploymentTemplate string + +// MachineAPIControllersReconciler creates a appuio-machine-api-controllers deployment based on the images.json ConfigMap +// if the upstream machine-api-controllers does not exist. +type MachineAPIControllersReconciler struct { + client.Client + Scheme *runtime.Scheme + + Namespace string +} + +func (r *MachineAPIControllersReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + if req.Name != imagesConfigMapName { + return ctrl.Result{}, nil + } + + l := log.FromContext(ctx).WithName("UpstreamDeploymentReconciler.Reconcile") + l.Info("Reconciling") + + var imageCM corev1.ConfigMap + if err := r.Get(ctx, req.NamespacedName, &imageCM); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + ij, ok := imageCM.Data[imageKey] + if !ok { + return ctrl.Result{}, fmt.Errorf("%q key not found in ConfigMap %q", imageKey, imagesConfigMapName) + } + images := make(map[string]string) + if err := json.Unmarshal([]byte(ij), &images); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to unmarshal %q from %q: %w", imageKey, imagesConfigMapName, err) + } + + // Check that the original upstream deployment does not exist + // If it does, we should not create the new deployment + var upstreamDeployment appsv1.Deployment + err := r.Get(ctx, client.ObjectKey{ + Name: originalUpstreamDeploymentName, + Namespace: r.Namespace, + }, &upstreamDeployment) + if err == nil { + return ctrl.Result{}, fmt.Errorf("original upstream deployment %s already exists", originalUpstreamDeploymentName) + } else if !apierrors.IsNotFound(err) { + return ctrl.Result{}, fmt.Errorf("failed to check for original upstream deployment %s: %w", originalUpstreamDeploymentName, err) + } + + vm, err := jsonnetVMWithContext(images) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create jsonnet VM: %w", err) + } + + ud, err := vm.EvaluateAnonymousSnippet("controllers_deployment.jsonnet", deploymentTemplate) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to evaluate jsonnet: %w", err) + } + + // TODO(bastjan) this could be way more generic and support any kind of object. + // We don't need any other object types right now, so we're keeping it simple. + var toDeploy appsv1.Deployment + if err := json.Unmarshal([]byte(ud), &toDeploy); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to unmarshal jsonnet output: %w", err) + } + if toDeploy.APIVersion != "apps/v1" || toDeploy.Kind != "Deployment" { + return ctrl.Result{}, fmt.Errorf("expected Deployment, got %s/%s", toDeploy.APIVersion, toDeploy.Kind) + } + toDeploy.Namespace = r.Namespace + if err := controllerutil.SetControllerReference(&imageCM, &toDeploy, r.Scheme); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to set controller reference: %w", err) + } + if err := r.Client.Patch(ctx, &toDeploy, client.Apply, client.FieldOwner("upstream-deployment-controller")); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to apply Deployment %q: %w", toDeploy.GetName(), err) + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *MachineAPIControllersReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.ConfigMap{}). + Owns(&appsv1.Deployment{}). + Complete(r) +} + +func jsonnetVMWithContext(images map[string]string) (*jsonnet.VM, error) { + jcr, err := json.Marshal(map[string]any{ + "images": images, + }) + if err != nil { + return nil, fmt.Errorf("unable to marshal jsonnet context: %w", err) + } + jvm := jsonnet.MakeVM() + jvm.ExtCode("context", string(jcr)) + // Don't allow imports + jvm.Importer(&jsonnet.MemoryImporter{}) + return jvm, nil +} diff --git a/controllers/machine_api_controllers_controller_test.go b/controllers/machine_api_controllers_controller_test.go new file mode 100644 index 0000000..24040b6 --- /dev/null +++ b/controllers/machine_api_controllers_controller_test.go @@ -0,0 +1,159 @@ +package controllers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func Test_MachineAPIControllersReconciler_Reconcile(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + const namespace = "openshift-machine-api" + + scheme := runtime.NewScheme() + require.NoError(t, clientgoscheme.AddToScheme(scheme)) + + images := map[string]string{ + "machineAPIOperator": "registry.io/machine-api-operator:v1.0.0", + "kubeRBACProxy": "registry.io/kube-rbac-proxy:v1.0.0", + } + imagesJSON, err := json.Marshal(images) + require.NoError(t, err) + + ucm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: imagesConfigMapName, + Namespace: namespace, + }, + Data: map[string]string{ + imageKey: string(imagesJSON), + }, + } + + c := &fakeSSA{ + fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(ucm). + Build(), + } + + r := &MachineAPIControllersReconciler{ + Client: c, + Scheme: scheme, + + Namespace: namespace, + } + + _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(ucm)}) + require.NoError(t, err) + + var deployment appsv1.Deployment + require.NoError(t, c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: "appuio-" + originalUpstreamDeploymentName}, &deployment)) + + assert.Equal(t, "system-node-critical", deployment.Spec.Template.Spec.PriorityClassName) + for _, c := range deployment.Spec.Template.Spec.Containers { + if c.Image == images["machineAPIOperator"] || c.Image == images["kubeRBACProxy"] { + continue + } + t.Errorf("expected image %q or %q, got %q", images["machineAPIOperator"], images["kubeRBACProxy"], c.Image) + } +} + +func Test_MachineAPIControllersReconciler_OriginalDeploymentExists(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + const namespace = "openshift-machine-api" + + scheme := runtime.NewScheme() + require.NoError(t, clientgoscheme.AddToScheme(scheme)) + + images := map[string]string{ + "machineAPIOperator": "registry.io/machine-api-operator:v1.0.0", + "kubeRBACProxy": "registry.io/kube-rbac-proxy:v1.0.0", + } + imagesJSON, err := json.Marshal(images) + require.NoError(t, err) + + ucm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: imagesConfigMapName, + Namespace: namespace, + }, + Data: map[string]string{ + imageKey: string(imagesJSON), + }, + } + + origDeploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: originalUpstreamDeploymentName, + Namespace: namespace, + }, + } + + c := &fakeSSA{ + fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(ucm, origDeploy). + Build(), + } + + r := &MachineAPIControllersReconciler{ + Client: c, + Scheme: scheme, + + Namespace: namespace, + } + + _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(ucm)}) + require.ErrorContains(t, err, "machine-api-controllers already exists") +} + +// fakeSSA is a fake client that approximates SSA. +// It creates objects that don't exist yet and _updates_ them if they exist. +// This is completely kaputt since the object is overwritten with the new object. +// See https://github.com/kubernetes-sigs/controller-runtime/issues/2341 +type fakeSSA struct { + client.WithWatch +} + +// Patch approximates SSA by creating objects that don't exist yet. +func (f *fakeSSA) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + // Apply patches are supposed to upsert, but fake client fails if the object doesn't exist, + // if an apply patch occurs for an object that doesn't yet exist, create it. + if patch.Type() != types.ApplyPatchType { + return f.WithWatch.Patch(ctx, obj, patch, opts...) + } + check, ok := obj.DeepCopyObject().(client.Object) + if !ok { + return errors.New("could not check for object in fake client") + } + if err := f.WithWatch.Get(ctx, client.ObjectKeyFromObject(obj), check); apierrors.IsNotFound(err) { + if err := f.WithWatch.Create(ctx, check); err != nil { + return fmt.Errorf("could not inject object creation for fake: %w", err) + } + } else if err != nil { + return fmt.Errorf("could not check for object in fake client: %w", err) + } + return f.WithWatch.Update(ctx, obj) +} diff --git a/controllers/machine_api_controllers_deployment.jsonnet b/controllers/machine_api_controllers_deployment.jsonnet new file mode 100644 index 0000000..3b6a348 --- /dev/null +++ b/controllers/machine_api_controllers_deployment.jsonnet @@ -0,0 +1,303 @@ +local context = std.extVar('context'); + +local controllerImage = context.images.machineAPIOperator; +local rbacProxyImage = context.images.kubeRBACProxy; + +local kubeProxyContainer = function(upstreamPort, portName, exposePort) { + args: [ + '--secure-listen-address=0.0.0.0:%s' % exposePort, + '--upstream=http://localhost:%s' % upstreamPort, + '--config-file=/etc/kube-rbac-proxy/config-file.yaml', + '--tls-cert-file=/etc/tls/private/tls.crt', + '--tls-private-key-file=/etc/tls/private/tls.key', + '--tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305', + '--logtostderr=true', + '--v=3', + ], + image: rbacProxyImage, + imagePullPolicy: 'IfNotPresent', + name: 'kube-rbac-proxy-%s' % portName, + ports: [ + { + containerPort: exposePort, + name: portName, + protocol: 'TCP', + }, + ], + resources: { + requests: { + cpu: '10m', + memory: '20Mi', + }, + }, + terminationMessagePath: '/dev/termination-log', + terminationMessagePolicy: 'File', + volumeMounts: [ + { + mountPath: '/etc/kube-rbac-proxy', + name: 'config', + }, + { + mountPath: '/etc/tls/private', + name: 'machine-api-controllers-tls', + }, + ], +}; + + +local controllersDeployment = { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + annotations: {}, + labels: { + api: 'clusterapi', + 'k8s-app': 'controller', + }, + name: 'appuio-machine-api-controllers', + }, + spec: { + progressDeadlineSeconds: 600, + replicas: 1, + revisionHistoryLimit: 10, + selector: { + matchLabels: { + api: 'clusterapi', + 'k8s-app': 'controller', + }, + }, + strategy: { + rollingUpdate: { + maxSurge: '25%', + maxUnavailable: '25%', + }, + type: 'RollingUpdate', + }, + template: { + metadata: { + annotations: { + 'target.workload.openshift.io/management': '{"effect": "PreferredDuringScheduling"}', + }, + creationTimestamp: null, + labels: { + api: 'clusterapi', + 'k8s-app': 'controller', + }, + }, + spec: { + containers: [ + { + args: [ + '--logtostderr=true', + '--v=3', + '--leader-elect=true', + '--leader-elect-lease-duration=120s', + '--namespace=openshift-machine-api', + ], + command: [ + '/machineset-controller', + ], + image: controllerImage, + imagePullPolicy: 'IfNotPresent', + livenessProbe: { + failureThreshold: 3, + httpGet: { + path: '/readyz', + port: 'healthz', + scheme: 'HTTP', + }, + periodSeconds: 10, + successThreshold: 1, + timeoutSeconds: 1, + }, + name: 'machineset-controller', + ports: [ + { + containerPort: 8443, + name: 'webhook-server', + protocol: 'TCP', + }, + { + containerPort: 9441, + name: 'healthz', + protocol: 'TCP', + }, + ], + readinessProbe: { + failureThreshold: 3, + httpGet: { + path: '/healthz', + port: 'healthz', + scheme: 'HTTP', + }, + periodSeconds: 10, + successThreshold: 1, + timeoutSeconds: 1, + }, + resources: { + requests: { + cpu: '10m', + memory: '20Mi', + }, + }, + terminationMessagePath: '/dev/termination-log', + terminationMessagePolicy: 'File', + volumeMounts: [ + { + mountPath: '/etc/machine-api-operator/tls', + name: 'machineset-webhook-cert', + readOnly: true, + }, + ], + }, + { + args: [ + '--logtostderr=true', + '--v=3', + '--leader-elect=true', + '--leader-elect-lease-duration=120s', + '--namespace=openshift-machine-api', + ], + command: [ + '/nodelink-controller', + ], + image: controllerImage, + imagePullPolicy: 'IfNotPresent', + name: 'nodelink-controller', + resources: { + requests: { + cpu: '10m', + memory: '20Mi', + }, + }, + terminationMessagePath: '/dev/termination-log', + terminationMessagePolicy: 'File', + }, + { + args: [ + '--logtostderr=true', + '--v=3', + '--leader-elect=true', + '--leader-elect-lease-duration=120s', + '--namespace=openshift-machine-api', + ], + command: [ + '/machine-healthcheck', + ], + image: controllerImage, + imagePullPolicy: 'IfNotPresent', + livenessProbe: { + failureThreshold: 3, + httpGet: { + path: '/readyz', + port: 'healthz', + scheme: 'HTTP', + }, + periodSeconds: 10, + successThreshold: 1, + timeoutSeconds: 1, + }, + name: 'machine-healthcheck-controller', + ports: [ + { + containerPort: 9442, + name: 'healthz', + protocol: 'TCP', + }, + ], + readinessProbe: { + failureThreshold: 3, + httpGet: { + path: '/healthz', + port: 'healthz', + scheme: 'HTTP', + }, + periodSeconds: 10, + successThreshold: 1, + timeoutSeconds: 1, + }, + resources: { + requests: { + cpu: '10m', + memory: '20Mi', + }, + }, + terminationMessagePath: '/dev/termination-log', + terminationMessagePolicy: 'File', + }, + kubeProxyContainer('8082', 'machineset-mtrc', 8442), + kubeProxyContainer('8081', 'machine-mtrc', 8441), + kubeProxyContainer('8083', 'mhc-mtrc', 8444), + ], + dnsPolicy: 'ClusterFirst', + nodeSelector: { + 'node-role.kubernetes.io/master': '', + }, + priorityClassName: 'system-node-critical', + restartPolicy: 'Always', + schedulerName: 'default-scheduler', + securityContext: {}, + serviceAccount: 'machine-api-controllers', + serviceAccountName: 'machine-api-controllers', + terminationGracePeriodSeconds: 30, + tolerations: [ + { + effect: 'NoSchedule', + key: 'node-role.kubernetes.io/master', + }, + { + key: 'CriticalAddonsOnly', + operator: 'Exists', + }, + { + effect: 'NoExecute', + key: 'node.kubernetes.io/not-ready', + operator: 'Exists', + tolerationSeconds: 120, + }, + { + effect: 'NoExecute', + key: 'node.kubernetes.io/unreachable', + operator: 'Exists', + tolerationSeconds: 120, + }, + ], + volumes: [ + { + name: 'machineset-webhook-cert', + secret: { + defaultMode: 420, + items: [ + { + key: 'tls.crt', + path: 'tls.crt', + }, + { + key: 'tls.key', + path: 'tls.key', + }, + ], + secretName: 'machine-api-operator-webhook-cert', + }, + }, + { + configMap: { + defaultMode: 420, + name: 'kube-rbac-proxy', + }, + name: 'config', + }, + { + name: 'machine-api-controllers-tls', + secret: { + defaultMode: 420, + secretName: 'machine-api-controllers-tls', + }, + }, + ], + }, + }, + }, +}; + +controllersDeployment diff --git a/controllers/machineset_controller_test.go b/controllers/machineset_controller_test.go index 6633a2b..3b3d42d 100644 --- a/controllers/machineset_controller_test.go +++ b/controllers/machineset_controller_test.go @@ -17,6 +17,8 @@ import ( ) func Test_MachineSetReconciler_Reconcile(t *testing.T) { + t.Parallel() + ctx := context.Background() scheme := runtime.NewScheme() diff --git a/main.go b/main.go index b3df9b3..9ee902f 100644 --- a/main.go +++ b/main.go @@ -35,10 +35,12 @@ import ( "k8s.io/apiserver/pkg/util/feature" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/rest" "k8s.io/component-base/featuregate" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -60,7 +62,7 @@ func init() { func main() { var target string - flag.StringVar(&target, "target", "manager", "The target mode of this binary. Valid values are 'manager' and 'termination-handler'.") + flag.StringVar(&target, "target", "manager", "The target mode of this binary. Valid values are 'manager', 'machine-api-controllers-manager', and 'termination-handler'.") var metricsAddr string var enableLeaderElection bool @@ -96,6 +98,8 @@ func main() { runManager(metricsAddr, probeAddr, watchNamespace, enableLeaderElection, featureGate) case "termination-handler": runTerminationHandler() + case "machine-api-controllers-manager": + runMachineAPIControllersManager(metricsAddr, probeAddr, watchNamespace, enableLeaderElection) default: setupLog.Error(nil, "invalid target", "target", target) os.Exit(1) @@ -143,6 +147,15 @@ func runManager(metricsAddr, probeAddr, watchNamespace string, enableLeaderElect os.Exit(1) } + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + versionString := "unknown" if v, ok := debug.ReadBuildInfo(); ok { versionString = fmt.Sprintf("%s (%s)", v.Main.Version, v.GoVersion) @@ -191,3 +204,59 @@ func runManager(metricsAddr, probeAddr, watchNamespace string, enableLeaderElect func runTerminationHandler() { panic("not implemented") } + +func runMachineAPIControllersManager(metricsAddr, probeAddr, watchNamespace string, enableLeaderElection bool) { + if watchNamespace == "" { + setupLog.Error(nil, "namespace must be set for the machine-api-controllers manager") + os.Exit(1) + } + + opts := ctrl.Options{ + Scheme: scheme, + Metrics: server.Options{ + BindAddress: metricsAddr, + }, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "458f6dca.appuio.io", + LeaderElectionReleaseOnCancel: true, + + // Limit the manager to only watch the namespace the controller is running in. + NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) { + opts.DefaultNamespaces = map[string]cache.Config{ + watchNamespace: {}, + } + return cache.New(config, opts) + }, + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), opts) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + if err := (&controllers.MachineAPIControllersReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + + Namespace: watchNamespace, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "UpstreamDeployment") + os.Exit(1) + } + + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +}