diff --git a/pkg/binding/binding.go b/pkg/binding/binding.go index ffdd0a460bd..0cb8dbf2505 100644 --- a/pkg/binding/binding.go +++ b/pkg/binding/binding.go @@ -12,9 +12,11 @@ import ( "github.com/devfile/library/pkg/devfile/parser" devfilefs "github.com/devfile/library/pkg/testingutil/filesystem" "gopkg.in/yaml.v2" + appsv1 "k8s.io/api/apps/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" devfilev1alpha2 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" parsercommon "github.com/devfile/library/pkg/devfile/parser/data/v2/common" @@ -296,3 +298,56 @@ func (o *BindingClient) checkServiceBindingOperatorInstalled() error { } return nil } + +func (o *BindingClient) CheckServiceBindingsInjectionDone(componentName string, appName string) (bool, error) { + + deployment, err := o.kubernetesClient.GetOneDeployment(componentName, appName) + if err != nil { + // If not deployment yet => all bindings are done + if _, ok := err.(*kclient.DeploymentNotFoundError); ok { + return true, nil + } + return false, err + } + deploymentName := deployment.GetName() + + specList, bindingList, err := o.kubernetesClient.ListServiceBindingsFromAllGroups() + if err != nil { + // If ServiceBinding kind is not registered => all bindings are done + if runtime.IsNotRegisteredError(err) { + return true, nil + } + return false, err + } + + for _, binding := range bindingList { + app := binding.Spec.Application + if app.Group != appsv1.SchemeGroupVersion.Group || + app.Version != appsv1.SchemeGroupVersion.Version || + (app.Kind != "Deployment" && app.Resource != "deployments") { + continue + } + if app.Name != deploymentName { + continue + } + if injected := meta.IsStatusConditionTrue(binding.Status.Conditions, bindingApis.InjectionReady); !injected { + return false, nil + } + } + + for _, binding := range specList { + app := binding.Spec.Workload + if app.APIVersion != appsv1.SchemeGroupVersion.String() || + app.Kind != "Deployment" { + continue + } + if app.Name != deploymentName { + continue + } + if injected := meta.IsStatusConditionTrue(binding.Status.Conditions, bindingApis.InjectionReady); !injected { + return false, nil + } + } + + return true, nil +} diff --git a/pkg/binding/interface.go b/pkg/binding/interface.go index 3b59807aa24..f2dcace6378 100644 --- a/pkg/binding/interface.go +++ b/pkg/binding/interface.go @@ -72,4 +72,7 @@ type Client interface { ValidateRemoveBinding(flags map[string]string) error // RemoveBinding removes the binding from devfile RemoveBinding(bindingName string, obj parser.DevfileObj) (parser.DevfileObj, error) + + // CheckServiceBindingsInjectionDone checks that all service bindings pointing to component have InjectionReady condition + CheckServiceBindingsInjectionDone(componentName string, appName string) (bool, error) } diff --git a/pkg/binding/mock.go b/pkg/binding/mock.go index 89373b8fffd..873ec147dee 100644 --- a/pkg/binding/mock.go +++ b/pkg/binding/mock.go @@ -115,6 +115,21 @@ func (mr *MockClientMockRecorder) AskNamingStrategy(flags interface{}) *gomock.C return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AskNamingStrategy", reflect.TypeOf((*MockClient)(nil).AskNamingStrategy), flags) } +// CheckServiceBindingsInjectionDone mocks base method. +func (m *MockClient) CheckServiceBindingsInjectionDone(componentName, appName string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CheckServiceBindingsInjectionDone", componentName, appName) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CheckServiceBindingsInjectionDone indicates an expected call of CheckServiceBindingsInjectionDone. +func (mr *MockClientMockRecorder) CheckServiceBindingsInjectionDone(componentName, appName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckServiceBindingsInjectionDone", reflect.TypeOf((*MockClient)(nil).CheckServiceBindingsInjectionDone), componentName, appName) +} + // GetBindingFromCluster mocks base method. func (m *MockClient) GetBindingFromCluster(name string) (api.ServiceBinding, error) { m.ctrl.T.Helper() diff --git a/pkg/component/component.go b/pkg/component/component.go index 69d046e7d72..fb0425d835e 100644 --- a/pkg/component/component.go +++ b/pkg/component/component.go @@ -88,7 +88,7 @@ func Exists(client kclient.ClientInterface, componentName, applicationName strin // GetOnePod gets a pod using the component and app name func GetOnePod(client kclient.ClientInterface, componentName string, appName string) (*corev1.Pod, error) { - return client.GetOnePodFromSelector(odolabels.GetSelector(componentName, appName, odolabels.ComponentDevMode)) + return client.GetRunningPodFromSelector(odolabels.GetSelector(componentName, appName, odolabels.ComponentDevMode)) } // Log returns log from component diff --git a/pkg/component/delete/delete.go b/pkg/component/delete/delete.go index c29ab1acfad..23881fec43f 100644 --- a/pkg/component/delete/delete.go +++ b/pkg/component/delete/delete.go @@ -156,7 +156,7 @@ func (do *DeleteComponentClient) ExecutePreStopEvents(devfileObj parser.DevfileO klog.V(3).Infof("Checking component status for %q", componentName) selector := odolabels.GetSelector(componentName, appName, odolabels.ComponentDevMode) - pod, err := do.kubeClient.GetOnePodFromSelector(selector) + pod, err := do.kubeClient.GetRunningPodFromSelector(selector) if err != nil { klog.V(1).Info("Component not found on the cluster.") diff --git a/pkg/component/delete/delete_test.go b/pkg/component/delete/delete_test.go index c6010c425d6..4b0a299e13a 100644 --- a/pkg/component/delete/delete_test.go +++ b/pkg/component/delete/delete_test.go @@ -524,7 +524,7 @@ func TestDeleteComponentClient_ExecutePreStopEvents(t *testing.T) { client := kclient.NewMockClientInterface(ctrl) selector := odolabels.GetSelector(componentName, "app", odolabels.ComponentDevMode) - client.EXPECT().GetOnePodFromSelector(selector).Return(&corev1.Pod{}, &kclient.PodNotFoundError{Selector: selector}) + client.EXPECT().GetRunningPodFromSelector(selector).Return(&corev1.Pod{}, &kclient.PodNotFoundError{Selector: selector}) return client }, }, @@ -541,7 +541,7 @@ func TestDeleteComponentClient_ExecutePreStopEvents(t *testing.T) { client := kclient.NewMockClientInterface(ctrl) selector := odolabels.GetSelector(componentName, "app", odolabels.ComponentDevMode) - client.EXPECT().GetOnePodFromSelector(selector).Return(nil, errors.New("some un-ignorable error")) + client.EXPECT().GetRunningPodFromSelector(selector).Return(nil, errors.New("some un-ignorable error")) return client }, }, @@ -558,7 +558,7 @@ func TestDeleteComponentClient_ExecutePreStopEvents(t *testing.T) { client := kclient.NewMockClientInterface(ctrl) selector := odolabels.GetSelector(componentName, "app", odolabels.ComponentDevMode) - client.EXPECT().GetOnePodFromSelector(selector).Return(odoTestingUtil.CreateFakePod(componentName, "runtime"), nil) + client.EXPECT().GetRunningPodFromSelector(selector).Return(odoTestingUtil.CreateFakePod(componentName, "runtime"), nil) cmd := []string{"/bin/sh", "-c", "cd /projects/nodejs-starter && (echo \"Hello World!\") 1>>/proc/1/fd/1 2>>/proc/1/fd/2"} client.EXPECT().ExecCMDInContainer("runtime", "runtime", cmd, gomock.Any(), gomock.Any(), nil, false).Return(nil) @@ -581,7 +581,7 @@ func TestDeleteComponentClient_ExecutePreStopEvents(t *testing.T) { selector := odolabels.GetSelector(componentName, "app", odolabels.ComponentDevMode) pod := odoTestingUtil.CreateFakePod(componentName, "runtime") pod.Status.Phase = corev1.PodFailed - client.EXPECT().GetOnePodFromSelector(selector).Return(pod, nil) + client.EXPECT().GetRunningPodFromSelector(selector).Return(pod, nil) return client }, }, @@ -600,7 +600,7 @@ func TestDeleteComponentClient_ExecutePreStopEvents(t *testing.T) { selector := odolabels.GetSelector(componentName, "app", odolabels.ComponentDevMode) fakePod := odoTestingUtil.CreateFakePod(componentName, "runtime") //Expecting this method to be called twice because if the command execution fails, we try to get the pod logs by calling GetOnePodFromSelector again. - client.EXPECT().GetOnePodFromSelector(selector).Return(fakePod, nil).Times(2) + client.EXPECT().GetRunningPodFromSelector(selector).Return(fakePod, nil).Times(2) client.EXPECT().GetPodLogs(fakePod.Name, gomock.Any(), gomock.Any()).Return(nil, errors.New("an error")) diff --git a/pkg/dev/dev.go b/pkg/dev/dev.go index 4bd42bb5ae9..6a1652d69dd 100644 --- a/pkg/dev/dev.go +++ b/pkg/dev/dev.go @@ -4,6 +4,7 @@ import ( "context" "io" + "github.com/redhat-developer/odo/pkg/binding" "github.com/redhat-developer/odo/pkg/envinfo" "github.com/redhat-developer/odo/pkg/kclient" "github.com/redhat-developer/odo/pkg/portForward" @@ -22,6 +23,7 @@ type DevClient struct { prefClient preference.Client portForwardClient portForward.Client watchClient watch.Client + bindingClient binding.Client } var _ Client = (*DevClient)(nil) @@ -31,12 +33,14 @@ func NewDevClient( prefClient preference.Client, portForwardClient portForward.Client, watchClient watch.Client, + bindingClient binding.Client, ) *DevClient { return &DevClient{ kubernetesClient: kubernetesClient, prefClient: prefClient, portForwardClient: portForwardClient, watchClient: watchClient, + bindingClient: bindingClient, } } @@ -50,10 +54,10 @@ func (o *DevClient) Start( runCommand string, randomPorts bool, errOut io.Writer, -) error { +) (watch.ComponentStatus, error) { klog.V(4).Infoln("Creating new adapter") adapter := component.NewKubernetesAdapter( - o.kubernetesClient, o.prefClient, o.portForwardClient, + o.kubernetesClient, o.prefClient, o.portForwardClient, o.bindingClient, component.AdapterContext{ ComponentName: devfileObj.GetMetadataName(), Context: path, @@ -64,7 +68,7 @@ func (o *DevClient) Start( envSpecificInfo, err := envinfo.NewEnvSpecificInfo(path) if err != nil { - return err + return watch.ComponentStatus{}, err } pushParameters := adapters.PushParameters{ @@ -80,15 +84,17 @@ func (o *DevClient) Start( } klog.V(4).Infoln("Creating inner-loop resources for the component") - err = adapter.Push(pushParameters) + componentStatus := watch.ComponentStatus{} + err = adapter.Push(pushParameters, &componentStatus) if err != nil { - return err + return watch.ComponentStatus{}, err } klog.V(4).Infoln("Successfully created inner-loop resources") - return nil + return componentStatus, nil } func (o *DevClient) Watch( + devfilePath string, devfileObj parser.DevfileObj, path string, ignorePaths []string, @@ -100,7 +106,9 @@ func (o *DevClient) Watch( runCommand string, variables map[string]string, randomPorts bool, + watchFiles bool, errOut io.Writer, + componentStatus watch.ComponentStatus, ) error { envSpecificInfo, err := envinfo.NewEnvSpecificInfo(path) if err != nil { @@ -108,6 +116,7 @@ func (o *DevClient) Watch( } watchParameters := watch.WatchParameters{ + DevfilePath: devfilePath, Path: path, ComponentName: devfileObj.GetMetadataName(), ApplicationName: "app", @@ -121,8 +130,9 @@ func (o *DevClient) Watch( DebugPort: envSpecificInfo.GetDebugPort(), Variables: variables, RandomPorts: randomPorts, + WatchFiles: watchFiles, ErrOut: errOut, } - return o.watchClient.WatchAndPush(out, watchParameters, ctx) + return o.watchClient.WatchAndPush(out, watchParameters, ctx, componentStatus) } diff --git a/pkg/dev/interface.go b/pkg/dev/interface.go index 8a8d04f5f89..8fe302c2f83 100644 --- a/pkg/dev/interface.go +++ b/pkg/dev/interface.go @@ -15,6 +15,7 @@ type Client interface { // If debug is true, executes the debug command, or the run command by default. // If buildCommand is set, this will look up the specified build command in the Devfile. Otherwise, it uses the default one. // If runCommand is set, this will look up the specified run command in the Devfile and execute it. Otherwise, it uses the default one. + // Returns the status of the started component Start( devfileObj parser.DevfileObj, namespace string, @@ -25,7 +26,7 @@ type Client interface { runCommand string, randomPorts bool, errOut io.Writer, - ) error + ) (watch.ComponentStatus, error) // Watch watches for any changes to the files under path while ignoring the files/directories in ignorePaths. // It logs messages to out and uses the Handler h to perform push operation when anything changes in path. @@ -33,7 +34,9 @@ type Client interface { // If debug is true, the debug command will be started after a sync, or the run command by default. // If buildCommand is set, this will look up the specified build command in the Devfile. Otherwise, it uses the default one. // If runCommand is set, this will look up the specified run command in the Devfile and execute it. Otherwise, it uses the default one. + // componentStatus is the status returned from the call to the Start Method Watch( + devfilePath string, devfileObj parser.DevfileObj, path string, ignorePaths []string, @@ -45,10 +48,12 @@ type Client interface { runCommand string, variables map[string]string, randomPorts bool, + watchFiles bool, errOut io.Writer, + componentStatus watch.ComponentStatus, ) error } type Handler interface { - RegenerateAdapterAndPush(adapters.PushParameters, watch.WatchParameters) error + RegenerateAdapterAndPush(adapters.PushParameters, watch.WatchParameters, *watch.ComponentStatus) error } diff --git a/pkg/dev/mock.go b/pkg/dev/mock.go index 6dfd86a243e..24871fa43a0 100644 --- a/pkg/dev/mock.go +++ b/pkg/dev/mock.go @@ -39,11 +39,12 @@ func (m *MockClient) EXPECT() *MockClientMockRecorder { } // Start mocks base method. -func (m *MockClient) Start(devfileObj parser.DevfileObj, namespace string, ignorePaths []string, path string, debug bool, buildCommand, runCommand string, randomPorts bool, errOut io.Writer) error { +func (m *MockClient) Start(devfileObj parser.DevfileObj, namespace string, ignorePaths []string, path string, debug bool, buildCommand, runCommand string, randomPorts bool, errOut io.Writer) (watch.ComponentStatus, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Start", devfileObj, namespace, ignorePaths, path, debug, buildCommand, runCommand, randomPorts, errOut) - ret0, _ := ret[0].(error) - return ret0 + ret0, _ := ret[0].(watch.ComponentStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 } // Start indicates an expected call of Start. @@ -53,17 +54,17 @@ func (mr *MockClientMockRecorder) Start(devfileObj, namespace, ignorePaths, path } // Watch mocks base method. -func (m *MockClient) Watch(devfileObj parser.DevfileObj, path string, ignorePaths []string, out io.Writer, h Handler, ctx context.Context, debug bool, buildCommand, runCommand string, variables map[string]string, randomPorts bool, errOut io.Writer) error { +func (m *MockClient) Watch(devfilePath string, devfileObj parser.DevfileObj, path string, ignorePaths []string, out io.Writer, h Handler, ctx context.Context, debug bool, buildCommand, runCommand string, variables map[string]string, randomPorts, watchFiles bool, errOut io.Writer, componentStatus watch.ComponentStatus) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Watch", devfileObj, path, ignorePaths, out, h, ctx, debug, buildCommand, runCommand, variables, randomPorts, errOut) + ret := m.ctrl.Call(m, "Watch", devfilePath, devfileObj, path, ignorePaths, out, h, ctx, debug, buildCommand, runCommand, variables, randomPorts, watchFiles, errOut, componentStatus) ret0, _ := ret[0].(error) return ret0 } // Watch indicates an expected call of Watch. -func (mr *MockClientMockRecorder) Watch(devfileObj, path, ignorePaths, out, h, ctx, debug, buildCommand, runCommand, variables, randomPorts, errOut interface{}) *gomock.Call { +func (mr *MockClientMockRecorder) Watch(devfilePath, devfileObj, path, ignorePaths, out, h, ctx, debug, buildCommand, runCommand, variables, randomPorts, watchFiles, errOut, componentStatus interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockClient)(nil).Watch), devfileObj, path, ignorePaths, out, h, ctx, debug, buildCommand, runCommand, variables, randomPorts, errOut) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockClient)(nil).Watch), devfilePath, devfileObj, path, ignorePaths, out, h, ctx, debug, buildCommand, runCommand, variables, randomPorts, watchFiles, errOut, componentStatus) } // MockHandler is a mock of Handler interface. @@ -90,15 +91,15 @@ func (m *MockHandler) EXPECT() *MockHandlerMockRecorder { } // RegenerateAdapterAndPush mocks base method. -func (m *MockHandler) RegenerateAdapterAndPush(arg0 adapters.PushParameters, arg1 watch.WatchParameters) error { +func (m *MockHandler) RegenerateAdapterAndPush(arg0 adapters.PushParameters, arg1 watch.WatchParameters, arg2 *watch.ComponentStatus) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegenerateAdapterAndPush", arg0, arg1) + ret := m.ctrl.Call(m, "RegenerateAdapterAndPush", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // RegenerateAdapterAndPush indicates an expected call of RegenerateAdapterAndPush. -func (mr *MockHandlerMockRecorder) RegenerateAdapterAndPush(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) RegenerateAdapterAndPush(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegenerateAdapterAndPush", reflect.TypeOf((*MockHandler)(nil).RegenerateAdapterAndPush), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegenerateAdapterAndPush", reflect.TypeOf((*MockHandler)(nil).RegenerateAdapterAndPush), arg0, arg1, arg2) } diff --git a/pkg/devfile/adapters/errors.go b/pkg/devfile/adapters/errors.go new file mode 100644 index 00000000000..d0bde259ebb --- /dev/null +++ b/pkg/devfile/adapters/errors.go @@ -0,0 +1,15 @@ +package adapters + +import "fmt" + +type ErrPortForward struct { + cause error +} + +func NewErrPortForward(cause error) ErrPortForward { + return ErrPortForward{cause: cause} +} + +func (e ErrPortForward) Error() string { + return fmt.Sprintf("fail starting the port forwarding: %s", e.cause) +} diff --git a/pkg/devfile/adapters/kubernetes/component/adapter.go b/pkg/devfile/adapters/kubernetes/component/adapter.go index b641b93f87c..14e8b0428d3 100644 --- a/pkg/devfile/adapters/kubernetes/component/adapter.go +++ b/pkg/devfile/adapters/kubernetes/component/adapter.go @@ -1,15 +1,17 @@ package component import ( + "errors" "fmt" "io" "path/filepath" + "reflect" "strings" "k8s.io/utils/pointer" + "github.com/redhat-developer/odo/pkg/binding" "github.com/redhat-developer/odo/pkg/component" - "github.com/redhat-developer/odo/pkg/devfile" "github.com/redhat-developer/odo/pkg/devfile/adapters" "github.com/redhat-developer/odo/pkg/devfile/adapters/kubernetes/storage" "github.com/redhat-developer/odo/pkg/devfile/adapters/kubernetes/utils" @@ -25,6 +27,7 @@ import ( storagepkg "github.com/redhat-developer/odo/pkg/storage" "github.com/redhat-developer/odo/pkg/sync" "github.com/redhat-developer/odo/pkg/util" + "github.com/redhat-developer/odo/pkg/watch" devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" "github.com/devfile/library/pkg/devfile/generator" @@ -43,6 +46,7 @@ type Adapter struct { kubeClient kclient.ClientInterface prefClient preference.Client portForwardClient portForward.Client + bindingClient binding.Client AdapterContext logger machineoutput.MachineEventLoggingClient @@ -64,6 +68,7 @@ func NewKubernetesAdapter( kubernetesClient kclient.ClientInterface, prefClient preference.Client, portForwardClient portForward.Client, + bindingClient binding.Client, context AdapterContext, namespace string, ) Adapter { @@ -76,77 +81,18 @@ func NewKubernetesAdapter( kubeClient: kubernetesClient, prefClient: prefClient, portForwardClient: portForwardClient, + bindingClient: bindingClient, AdapterContext: context, logger: machineoutput.NewMachineEventLoggingClient(), } } -// getPod lazily records and retrieves the pod associated with the component associated with this adapter. If refresh parameter -// is true, then the pod is refreshed from the cluster regardless of its current local state -func (a *Adapter) getPod(pod *corev1.Pod, refresh bool) (*corev1.Pod, error) { - result := pod - if refresh || result == nil { - podSelector := fmt.Sprintf("component=%s", a.ComponentName) - - // Wait for Pod to be in running state otherwise we can't sync data to it. - var err error - result, err = a.kubeClient.WaitAndGetPodWithEvents(podSelector, corev1.PodRunning, a.prefClient.GetPushTimeout()) - if err != nil { - return nil, fmt.Errorf("error while waiting for pod %s: %w", podSelector, err) - } - } - return result, nil -} - -func (a *Adapter) ComponentInfo(pod *corev1.Pod, command devfilev1.Command) (adapters.ComponentInfo, error) { - pod, err := a.getPod(pod, false) - if err != nil { - return adapters.ComponentInfo{}, err - } - return adapters.ComponentInfo{ - PodName: pod.Name, - ContainerName: command.Exec.Component, - }, nil -} - // Push updates the component if a matching component exists or creates one if it doesn't exist // Once the component has started, it will sync the source code to it. -func (a Adapter) Push(parameters adapters.PushParameters) (err error) { - - // Get the Dev deployment: - // Since `odo deploy` can theoretically deploy a deployment as well with the same instance name - // we make sure that we are retrieving the deployment with the Dev mode, NOT Deploy. - selectorLabels := odolabels.GetSelector(a.ComponentName, a.AppName, odolabels.ComponentDevMode) - deployment, err := a.kubeClient.GetOneDeploymentFromSelector(selectorLabels) - - if err != nil { - if _, ok := err.(*kclient.DeploymentNotFoundError); !ok { - return fmt.Errorf("unable to determine if component %s exists: %w", a.ComponentName, err) - } - } - componentExists := deployment != nil - - podChanged := false - var podName string - - // If the component already exists, retrieve the pod's name before it's potentially updated - if componentExists { - // First see if the component does have a pod. it could have been scaled down to zero - _, err = a.kubeClient.GetOnePodFromSelector(fmt.Sprintf("component=%s", a.ComponentName)) - // If an error occurs, we don't call a.getPod (a blocking function that waits till it finds a pod in "Running" state.) - // We would rely on a call to a.createOrUpdateComponent to reset the pod count for the component to one. - if err == nil { - pod, podErr := a.getPod(nil, true) - if podErr != nil { - return fmt.Errorf("unable to get pod for component %s: %w", a.ComponentName, podErr) - } - podName = pod.GetName() - } - } - - s := log.Spinner("Waiting for Kubernetes resources") - defer s.End(false) +// The componentStatus will be modified to reflect the status of the component when the function returns +func (a Adapter) Push(parameters adapters.PushParameters, componentStatus *watch.ComponentStatus) (err error) { + // preliminary checks err = dfutil.ValidateK8sResourceName("component name", a.ComponentName) if err != nil { return err @@ -157,55 +103,25 @@ func (a Adapter) Push(parameters adapters.PushParameters) (err error) { return err } - pushDevfileCommands, err := libdevfile.ValidateAndGetPushCommands(a.Devfile, parameters.DevfileBuildCmd, parameters.DevfileRunCmd) + deployment, componentExists, err := a.getComponentDeployment() if err != nil { - return fmt.Errorf("failed to validate devfile build and run commands: %w", err) - } - - // Set the mode to Dev since we are using "odo dev" here - labels := odolabels.GetLabels(a.ComponentName, a.AppName, odolabels.ComponentDevMode) - - // Set the annotations for the component type - annotations := make(map[string]string) - odolabels.SetProjectType(annotations, component.GetComponentTypeFromDevfileMetadata(a.AdapterContext.Devfile.Data.GetMetadata())) - - previousMode := parameters.EnvSpecificInfo.GetRunMode() - currentMode := envinfo.Run - - if parameters.Debug { - pushDevfileDebugCommands, e := libdevfile.ValidateAndGetCommand(a.Devfile, parameters.DevfileDebugCmd, devfilev1.DebugCommandGroupKind) - if e != nil { - return fmt.Errorf("debug command is not valid: %w", e) - } - pushDevfileCommands[devfilev1.DebugCommandGroupKind] = pushDevfileDebugCommands - currentMode = envinfo.Debug + return err } - if currentMode != previousMode { - parameters.RunModeChanged = true + if componentStatus.State != watch.StateWaitDeployment && componentStatus.State != watch.StateReady { + log.SpinnerNoSpin("Waiting for Kubernetes resources") } - // fetch the "kubernetes inlined components" to create them on cluster - // from odo standpoint, these components contain yaml manifest of an odo service or an odo link - k8sComponents, err := devfile.GetKubernetesComponentsToPush(a.Devfile) - if err != nil { - return fmt.Errorf("error while trying to fetch service(s) from devfile: %w", err) - } + // Set the mode to Dev since we are using "odo dev" here + labels := odolabels.GetLabels(a.ComponentName, a.AppName, odolabels.ComponentDevMode) - // validate if the GVRs represented by Kubernetes inlined components are supported by the underlying cluster - err = service.ValidateResourcesExist(a.kubeClient, a.Devfile, k8sComponents, a.Context) + k8sComponents, err := a.pushDevfileKubernetesComponents(labels) if err != nil { return err } - // create the Kubernetes objects from the manifest and delete the ones not in the devfile - err = service.PushKubernetesResources(a.kubeClient, a.Devfile, k8sComponents, labels, annotations, a.Context) - if err != nil { - return fmt.Errorf("failed to create service(s) associated with the component: %w", err) - } - - isMainStorageEphemeral := a.prefClient.GetEphemeralSourceVolume() - deployment, err = a.createOrUpdateComponent(componentExists, parameters.EnvSpecificInfo, isMainStorageEphemeral, libdevfile.DevfileCommands{ + var updated bool + deployment, updated, err = a.createOrUpdateComponent(componentExists, parameters.EnvSpecificInfo, libdevfile.DevfileCommands{ BuildCmd: parameters.DevfileBuildCmd, RunCmd: parameters.DevfileRunCmd, DebugCmd: parameters.DevfileDebugCmd, @@ -214,37 +130,12 @@ func (a Adapter) Push(parameters adapters.PushParameters) (err error) { return fmt.Errorf("unable to create or update component: %w", err) } - deployment, err = a.kubeClient.WaitForDeploymentRollout(deployment.Name) - if err != nil { - return fmt.Errorf("error while waiting for deployment rollout: %w", err) - } - - // Wait for Pod to be in running state otherwise we can't sync data or exec commands to it. - pod, err := a.getPod(nil, true) - if err != nil { - return fmt.Errorf("unable to get pod for component %s: %w", a.ComponentName, err) - } - - // list the latest state of the PVCs - pvcs, err := a.kubeClient.ListPVCs(fmt.Sprintf("%v=%v", "component", a.ComponentName)) + ownerReference := generator.GetOwnerReference(deployment) + err = a.updatePVCsOwnerReferences(ownerReference) if err != nil { return err } - ownerReference := generator.GetOwnerReference(deployment) - // update the owner reference of the PVCs with the deployment - for i := range pvcs { - if pvcs[i].OwnerReferences != nil || pvcs[i].DeletionTimestamp != nil { - continue - } - err = a.kubeClient.TryWithBlockOwnerDeletion(ownerReference, func(ownerRef metav1.OwnerReference) error { - return a.kubeClient.UpdateStorageOwnerReference(&pvcs[i], ownerRef) - }) - if err != nil { - return err - } - } - // Update all services with owner references err = a.kubeClient.TryWithBlockOwnerDeletion(ownerReference, func(ownerRef metav1.OwnerReference) error { return service.UpdateServicesWithOwnerReferences(a.kubeClient, a.Devfile, k8sComponents, ownerRef, a.Context) @@ -254,45 +145,71 @@ func (a Adapter) Push(parameters adapters.PushParameters) (err error) { } // create the Kubernetes objects from the manifest and delete the ones not in the devfile - needRestart, err := service.PushLinks(a.kubeClient, a.Devfile, k8sComponents, labels, deployment, a.Context) + err = service.PushLinks(a.kubeClient, a.Devfile, k8sComponents, labels, deployment, a.Context) if err != nil { - return fmt.Errorf("failed to create service(s) associated with the component: %w", err) + return fmt.Errorf("failed to create service bindings associated with the component: %w", err) } - if needRestart { - err = a.kubeClient.WaitForPodDeletion(pod.Name) - if err != nil { - return err - } + if updated { + klog.V(4).Infof("Deployment has been updated to generation %d. Waiting new event...\n", deployment.GetGeneration()) + componentStatus.State = watch.StateWaitDeployment + return nil } - _, err = a.kubeClient.WaitForDeploymentRollout(deployment.Name) + numberReplicas := deployment.Status.ReadyReplicas + if numberReplicas != 1 { + klog.V(4).Infof("Deployment has %d ready replicas. Waiting new event...\n", numberReplicas) + componentStatus.State = watch.StateWaitDeployment + return nil + } + + injected, err := a.bindingClient.CheckServiceBindingsInjectionDone(a.ComponentName, a.AppName) if err != nil { - return fmt.Errorf("failed to update config to component deployed: %w", err) + return err } - // Wait for Pod to be in running state otherwise we can't sync data or exec commands to it. - pod, err = a.getPod(pod, true) + if !injected { + klog.V(4).Infof("Waiting for all service bindings to be injected...\n") + return errors.New("some servicebindings are not injected") + } + + // Check if endpoints changed in Devfile + portsToForward, err := a.portForwardClient.GetPortsToForward(a.Devfile) if err != nil { - return fmt.Errorf("unable to get pod for component %s: %w", a.ComponentName, err) + return err } + portsChanged := !reflect.DeepEqual(portsToForward, a.portForwardClient.GetForwardedPorts()) - parameters.EnvSpecificInfo.SetDevfileObj(a.Devfile) + if componentStatus.State == watch.StateReady && !portsChanged { + // If the deployment is already in Ready State, no need to continue + return nil + } - // Compare the name of the pod with the one before the rollout. If they differ, it means there's a new pod and a force push is required - if componentExists && podName != pod.GetName() { - podChanged = true + // Now the Deployment has a Ready replica, we can get the Pod to work inside it + pod, err := a.kubeClient.GetPodUsingComponentName(a.ComponentName) + if err != nil { + return fmt.Errorf("unable to get pod for component %s: %w", a.ComponentName, err) } + parameters.EnvSpecificInfo.SetDevfileObj(a.Devfile) // Find at least one pod with the source volume mounted, error out if none can be found containerName, syncFolder, err := getFirstContainerWithSourceVolume(pod.Spec.Containers) if err != nil { - return fmt.Errorf("error while retrieving container from pod %s with a mounted project volume: %w", podName, err) + return fmt.Errorf("error while retrieving container from pod %s with a mounted project volume: %w", pod.GetName(), err) } - s.End(true) + //s.End(true) - s = log.Spinner("Syncing files into the container") + s := log.Spinner("Syncing files into the container") defer s.End(false) + + // Get commands + pushDevfileCommands, err := a.getPushDevfileCommands(parameters) + if err != nil { + return fmt.Errorf("failed to validate devfile build and run commands: %w", err) + } + + podChanged := componentStatus.State == watch.StateWaitDeployment + // Get a sync adapter. Check if project files have changed and sync accordingly syncAdapter := sync.New(&a, a.kubeClient, a.ComponentName) compInfo := adapters.ComponentInfo{ @@ -310,19 +227,21 @@ func (a Adapter) Push(parameters adapters.PushParameters) (err error) { execRequired, err := syncAdapter.SyncFiles(syncParams) if err != nil { + componentStatus.State = watch.StateReady return fmt.Errorf("failed to sync to component with name %s: %w", a.ComponentName, err) } s.End(true) // PostStart events from the devfile will only be executed when the component // didn't previously exist - if !componentExists && libdevfile.HasPostStartEvents(a.Devfile) { + if !componentStatus.PostStartEventsDone && libdevfile.HasPostStartEvents(a.Devfile) { err = libdevfile.ExecPostStartEvents(a.Devfile, component.NewExecHandler(a.kubeClient, a.AppName, a.ComponentName, pod.Name, "", parameters.Show)) if err != nil { return err } } + componentStatus.PostStartEventsDone = true cmdKind := devfilev1.RunCommandGroupKind cmdName := parameters.DevfileRunCmd @@ -363,10 +282,10 @@ func (a Adapter) Push(parameters adapters.PushParameters) (err error) { commandType, cmd.Id) } - klog.V(4).Infof("running=%v, execRequired=%v, parameters.RunModeChanged=%v", - running, execRequired, parameters.RunModeChanged) + klog.V(4).Infof("running=%v, execRequired=%v", + running, execRequired) - if isComposite || !running || execRequired || parameters.RunModeChanged { + if isComposite || !running || execRequired { // Invoke the build command once (before calling libdevfile.ExecuteCommandByNameAndKind), as, if cmd is a composite command, // the handler we pass will be called for each command in that composite command. doExecuteBuildCommand := func() error { @@ -375,7 +294,7 @@ func (a Adapter) Push(parameters adapters.PushParameters) (err error) { return libdevfile.Build(a.Devfile, parameters.DevfileBuildCmd, execHandler) } if componentExists { - if parameters.RunModeChanged || cmd.Exec == nil || !util.SafeGetBool(cmd.Exec.HotReloadCapable) { + if cmd.Exec == nil || !util.SafeGetBool(cmd.Exec.HotReloadCapable) { if err = doExecuteBuildCommand(); err != nil { return err } @@ -391,26 +310,33 @@ func (a Adapter) Push(parameters adapters.PushParameters) (err error) { } } - if podChanged { + if podChanged || portsChanged { a.portForwardClient.StopPortForwarding() } err = a.portForwardClient.StartPortForwarding(a.Devfile, a.ComponentName, parameters.RandomPorts, parameters.ErrOut) if err != nil { - return fmt.Errorf("fail starting the port forwarding: %w", err) + return adapters.NewErrPortForward(err) } + componentStatus.EndpointsForwarded = a.portForwardClient.GetForwardedPorts() + componentStatus.State = watch.StateReady return nil } +// createOrUpdateComponent creates the deployment or updates it if it already exists +// with the expected spec. +// Returns the new deployment and if the generation of the deployment has been updated func (a *Adapter) createOrUpdateComponent( componentExists bool, ei envinfo.EnvSpecificInfo, - isMainStorageEphemeral bool, commands libdevfile.DevfileCommands, devfileDebugPort int, deployment *appsv1.Deployment, -) (*appsv1.Deployment, error) { +) (*appsv1.Deployment, bool, error) { + + isMainStorageEphemeral := a.prefClient.GetEphemeralSourceVolume() + ei.SetDevfileObj(a.Devfile) componentName := a.ComponentName @@ -422,13 +348,13 @@ func (a *Adapter) createOrUpdateComponent( // handle the ephemeral storage err := storage.HandleEphemeralStorage(a.kubeClient, storageClient, componentName, isMainStorageEphemeral) if err != nil { - return nil, err + return nil, false, err } // From devfile info, create PVCs and return ephemeral storages ephemerals, err := storagepkg.Push(storageClient, &ei) if err != nil { - return nil, err + return nil, false, err } // Set the labels @@ -441,10 +367,10 @@ func (a *Adapter) createOrUpdateComponent( containers, err := generator.GetContainers(a.Devfile, parsercommon.DevfileOptions{}) if err != nil { - return nil, err + return nil, false, err } if len(containers) == 0 { - return nil, fmt.Errorf("no valid components found in the devfile") + return nil, false, fmt.Errorf("no valid components found in the devfile") } // Add the project volume before generating init containers @@ -453,28 +379,28 @@ func (a *Adapter) createOrUpdateComponent( containers, err = utils.UpdateContainerEnvVars(a.Devfile, containers, commands.DebugCmd, devfileDebugPort) if err != nil { - return nil, err + return nil, false, err } containers, err = utils.UpdateContainersEntrypointsIfNeeded(a.Devfile, containers, commands.BuildCmd, commands.RunCmd, commands.DebugCmd) if err != nil { - return nil, err + return nil, false, err } initContainers, err := generator.GetInitContainers(a.Devfile) if err != nil { - return nil, err + return nil, false, err } // list all the pvcs for the component pvcs, err := a.kubeClient.ListPVCs(fmt.Sprintf("%v=%v", "component", componentName)) if err != nil { - return nil, err + return nil, false, err } odoSourcePVCName, volumeNameToVolInfo, err := storage.GetVolumeInfos(pvcs) if err != nil { - return nil, err + return nil, false, err } var allVolumes []corev1.Volume @@ -482,13 +408,13 @@ func (a *Adapter) createOrUpdateComponent( // Get PVC volumes and Volume Mounts pvcVolumes, err := storage.GetPersistentVolumesAndVolumeMounts(a.Devfile, containers, initContainers, volumeNameToVolInfo, parsercommon.DevfileOptions{}) if err != nil { - return nil, err + return nil, false, err } allVolumes = append(allVolumes, pvcVolumes...) ephemeralVolumes, err := storage.GetEphemeralVolumesAndVolumeMounts(a.Devfile, containers, initContainers, ephemerals, parsercommon.DevfileOptions{}) if err != nil { - return nil, err + return nil, false, err } allVolumes = append(allVolumes, ephemeralVolumes...) @@ -501,7 +427,7 @@ func (a *Adapter) createOrUpdateComponent( deploymentObjectMeta, err := a.generateDeploymentObjectMeta(deployment, labels, annotations) if err != nil { - return nil, err + return nil, false, err } deployParams := generator.DeploymentParams{ @@ -514,9 +440,15 @@ func (a *Adapter) createOrUpdateComponent( Replicas: pointer.Int32Ptr(1), } + // Save generation to check if deployment is updated later + var originalGeneration int64 = 0 + if deployment != nil { + originalGeneration = deployment.GetGeneration() + } + deployment, err = generator.GetDeployment(a.Devfile, deployParams) if err != nil { - return nil, err + return nil, false, err } if deployment.Annotations == nil { deployment.Annotations = make(map[string]string) @@ -533,7 +465,7 @@ func (a *Adapter) createOrUpdateComponent( serviceName, err := util.NamespaceKubernetesObjectWithTrim(componentName, a.AppName) if err != nil { - return nil, err + return nil, false, err } serviceObjectMeta := generator.GetObjectMeta(serviceName, a.kubeClient.GetCurrentNamespace(), labels, serviceAnnotations) serviceParams := generator.ServiceParams{ @@ -543,7 +475,7 @@ func (a *Adapter) createOrUpdateComponent( svc, err := generator.GetService(a.Devfile, serviceParams, parsercommon.DevfileOptions{}) if err != nil { - return nil, err + return nil, false, err } klog.V(2).Infof("Creating deployment %v", deployment.Spec.Template.GetName()) klog.V(2).Infof("The component name is %v", componentName) @@ -558,13 +490,13 @@ func (a *Adapter) createOrUpdateComponent( deployment, err = a.kubeClient.UpdateDeployment(*deployment) } if err != nil { - return nil, err + return nil, false, err } klog.V(2).Infof("Successfully updated component %v", componentName) ownerReference := generator.GetOwnerReference(deployment) err = a.createOrUpdateServiceForComponent(svc, componentName, ownerReference) if err != nil { - return nil, err + return nil, false, err } } else { if a.kubeClient.IsSSASupported() { @@ -574,7 +506,7 @@ func (a *Adapter) createOrUpdateComponent( } if err != nil { - return nil, err + return nil, false, err } klog.V(2).Infof("Successfully created component %v", componentName) @@ -587,14 +519,15 @@ func (a *Adapter) createOrUpdateComponent( return err }) if err != nil { - return nil, err + return nil, false, err } klog.V(2).Infof("Successfully created Service for component %s", componentName) } } + newGeneration := deployment.GetGeneration() - return deployment, nil + return deployment, newGeneration != originalGeneration, nil } func (a *Adapter) createOrUpdateServiceForComponent(svc *corev1.Service, componentName string, ownerReference metav1.OwnerReference) error { diff --git a/pkg/devfile/adapters/kubernetes/component/adapter_test.go b/pkg/devfile/adapters/kubernetes/component/adapter_test.go index 3991a62bd20..e5867d2192f 100644 --- a/pkg/devfile/adapters/kubernetes/component/adapter_test.go +++ b/pkg/devfile/adapters/kubernetes/component/adapter_test.go @@ -3,7 +3,6 @@ package component import ( "reflect" "testing" - "time" "github.com/devfile/library/pkg/devfile/parser/data" "github.com/golang/mock/gomock" @@ -27,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" ktesting "k8s.io/client-go/testing" ) @@ -131,8 +129,11 @@ func TestCreateOrUpdateComponent(t *testing.T) { Name: testComponentName, AppName: testAppName, }) - componentAdapter := NewKubernetesAdapter(fkclient, nil, nil, adapterCtx, "") - _, err := componentAdapter.createOrUpdateComponent(tt.running, tt.envInfo, false, libdevfile.DevfileCommands{}, 0, nil) + ctrl := gomock.NewController(t) + fakePrefClient := preference.NewMockClient(ctrl) + fakePrefClient.EXPECT().GetEphemeralSourceVolume() + componentAdapter := NewKubernetesAdapter(fkclient, fakePrefClient, nil, nil, adapterCtx, "") + _, _, err := componentAdapter.createOrUpdateComponent(tt.running, tt.envInfo, libdevfile.DevfileCommands{}, 0, nil) // Checks for unexpected error cases if !tt.wantErr == (err != nil) { @@ -247,80 +248,6 @@ func TestGetFirstContainerWithSourceVolume(t *testing.T) { } } -func TestWaitAndGetComponentPod(t *testing.T) { - - testComponentName := "test" - - tests := []struct { - name string - componentType devfilev1.ComponentType - status corev1.PodPhase - wantErr bool - }{ - { - name: "Case 1: Running", - status: corev1.PodRunning, - wantErr: false, - }, - { - name: "Case 2: Failed pod", - status: corev1.PodFailed, - wantErr: true, - }, - { - name: "Case 3: Unknown pod", - status: corev1.PodUnknown, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - devObj := devfileParser.DevfileObj{ - Data: func() data.DevfileData { - devfileData, err := data.NewDevfileData(string(data.APISchemaVersion200)) - if err != nil { - t.Error(err) - } - err = devfileData.AddComponents([]devfilev1.Component{testingutil.GetFakeContainerComponent("component")}) - if err != nil { - t.Error(err) - } - return devfileData - }(), - } - - adapterCtx := AdapterContext{ - ComponentName: testComponentName, - Devfile: devObj, - } - - fkclient, fkclientset := kclient.FakeNew() - fkWatch := watch.NewFake() - - // Change the status - go func() { - fkWatch.Modify(kclient.FakePodStatus(tt.status, testComponentName)) - }() - - fkclientset.Kubernetes.PrependWatchReactor("pods", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { - return true, fkWatch, nil - }) - - ctrl := gomock.NewController(t) - prefClient := preference.NewMockClient(ctrl) - prefClient.EXPECT().GetPushTimeout().Return(100 * time.Second) - componentAdapter := NewKubernetesAdapter(fkclient, prefClient, nil, adapterCtx, "") - _, err := componentAdapter.getPod(nil, false) - - // Checks for unexpected error cases - if !tt.wantErr == (err != nil) { - t.Errorf("component adapter create unexpected error %v, wantErr %v", err, tt.wantErr) - } - }) - } - -} - func getExecCommand(id string, group devfilev1.CommandGroupKind) devfilev1.Command { commands := [...]string{"ls -la", "pwd"} diff --git a/pkg/devfile/adapters/kubernetes/component/commandhandler.go b/pkg/devfile/adapters/kubernetes/component/commandhandler.go index afb4a101517..c91b4ba782e 100644 --- a/pkg/devfile/adapters/kubernetes/component/commandhandler.go +++ b/pkg/devfile/adapters/kubernetes/component/commandhandler.go @@ -66,7 +66,7 @@ func (a *adapterHandler) Execute(devfileCmd devfilev1.Command) error { // if we need to restart, issue the remote process handler command to stop all running commands first. // We do not need to restart Hot reload capable commands. if a.componentExists { - if a.parameters.RunModeChanged || devfileCmd.Exec == nil || !util.SafeGetBool(devfileCmd.Exec.HotReloadCapable) { + if devfileCmd.Exec == nil || !util.SafeGetBool(devfileCmd.Exec.HotReloadCapable) { klog.V(2).Infof("restart required for command %s", devfileCmd.Id) cmdDef, err := devfileCommandToRemoteCmdDefinition(devfileCmd) diff --git a/pkg/devfile/adapters/kubernetes/component/interface.go b/pkg/devfile/adapters/kubernetes/component/interface.go index c52044b12d4..c6be00f3483 100644 --- a/pkg/devfile/adapters/kubernetes/component/interface.go +++ b/pkg/devfile/adapters/kubernetes/component/interface.go @@ -1,8 +1,11 @@ package component -import "github.com/redhat-developer/odo/pkg/devfile/adapters" +import ( + "github.com/redhat-developer/odo/pkg/devfile/adapters" + "github.com/redhat-developer/odo/pkg/watch" +) // ComponentAdapter defines the functions that platform-specific adapters must implement type ComponentAdapter interface { - Push(parameters adapters.PushParameters) error + Push(parameters adapters.PushParameters, componentStatus *watch.ComponentStatus) error } diff --git a/pkg/devfile/adapters/kubernetes/component/push.go b/pkg/devfile/adapters/kubernetes/component/push.go new file mode 100644 index 00000000000..a1ed481e1d9 --- /dev/null +++ b/pkg/devfile/adapters/kubernetes/component/push.go @@ -0,0 +1,104 @@ +package component + +import ( + "fmt" + + "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/redhat-developer/odo/pkg/component" + "github.com/redhat-developer/odo/pkg/devfile" + "github.com/redhat-developer/odo/pkg/devfile/adapters" + "github.com/redhat-developer/odo/pkg/kclient" + odolabels "github.com/redhat-developer/odo/pkg/labels" + "github.com/redhat-developer/odo/pkg/libdevfile" + "github.com/redhat-developer/odo/pkg/service" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// getComponentDeployment returns the deployment associated with the component, if deployed +// and indicate if the deployment has been found +func (a *Adapter) getComponentDeployment() (*appsv1.Deployment, bool, error) { + // Get the Dev deployment: + // Since `odo deploy` can theoretically deploy a deployment as well with the same instance name + // we make sure that we are retrieving the deployment with the Dev mode, NOT Deploy. + selectorLabels := odolabels.GetSelector(a.ComponentName, a.AppName, odolabels.ComponentDevMode) + deployment, err := a.kubeClient.GetOneDeploymentFromSelector(selectorLabels) + + if err != nil { + if _, ok := err.(*kclient.DeploymentNotFoundError); !ok { + return nil, false, fmt.Errorf("unable to determine if component %s exists: %w", a.ComponentName, err) + } + } + componentExists := deployment != nil + return deployment, componentExists, nil +} + +// pushDevfileKubernetesComponents gets the Kubernetes components from the Devfile and push them to the cluster +// adding the specified labels to them +func (a *Adapter) pushDevfileKubernetesComponents( + labels map[string]string, +) ([]v1alpha2.Component, error) { + // fetch the "kubernetes inlined components" to create them on cluster + // from odo standpoint, these components contain yaml manifest of ServiceBinding + k8sComponents, err := devfile.GetKubernetesComponentsToPush(a.Devfile) + if err != nil { + return nil, fmt.Errorf("error while trying to fetch service(s) from devfile: %w", err) + } + + // validate if the GVRs represented by Kubernetes inlined components are supported by the underlying cluster + err = service.ValidateResourcesExist(a.kubeClient, a.Devfile, k8sComponents, a.Context) + if err != nil { + return nil, err + } + + // Set the annotations for the component type + annotations := make(map[string]string) + odolabels.SetProjectType(annotations, component.GetComponentTypeFromDevfileMetadata(a.AdapterContext.Devfile.Data.GetMetadata())) + + // create the Kubernetes objects from the manifest and delete the ones not in the devfile + err = service.PushKubernetesResources(a.kubeClient, a.Devfile, k8sComponents, labels, annotations, a.Context) + if err != nil { + return nil, fmt.Errorf("failed to create Kubernetes resources associated with the component: %w", err) + } + return k8sComponents, nil +} + +func (a *Adapter) getPushDevfileCommands(parameters adapters.PushParameters) (map[devfilev1.CommandGroupKind]devfilev1.Command, error) { + pushDevfileCommands, err := libdevfile.ValidateAndGetPushCommands(a.Devfile, parameters.DevfileBuildCmd, parameters.DevfileRunCmd) + if err != nil { + return nil, fmt.Errorf("failed to validate devfile build and run commands: %w", err) + } + + if parameters.Debug { + pushDevfileDebugCommands, e := libdevfile.ValidateAndGetCommand(a.Devfile, parameters.DevfileDebugCmd, devfilev1.DebugCommandGroupKind) + if e != nil { + return nil, fmt.Errorf("debug command is not valid: %w", e) + } + pushDevfileCommands[devfilev1.DebugCommandGroupKind] = pushDevfileDebugCommands + } + + return pushDevfileCommands, nil +} + +func (a *Adapter) updatePVCsOwnerReferences(ownerReference metav1.OwnerReference) error { + // list the latest state of the PVCs + pvcs, err := a.kubeClient.ListPVCs(fmt.Sprintf("%v=%v", "component", a.ComponentName)) + if err != nil { + return err + } + + // update the owner reference of the PVCs with the deployment + for i := range pvcs { + if pvcs[i].OwnerReferences != nil || pvcs[i].DeletionTimestamp != nil { + continue + } + err = a.kubeClient.TryWithBlockOwnerDeletion(ownerReference, func(ownerRef metav1.OwnerReference) error { + return a.kubeClient.UpdateStorageOwnerReference(&pvcs[i], ownerRef) + }) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/devfile/adapters/kubernetes/storage/utils.go b/pkg/devfile/adapters/kubernetes/storage/utils.go index 567d9091628..c6b9ce2b928 100644 --- a/pkg/devfile/adapters/kubernetes/storage/utils.go +++ b/pkg/devfile/adapters/kubernetes/storage/utils.go @@ -2,6 +2,7 @@ package storage import ( "fmt" + "sort" "strings" "github.com/devfile/library/pkg/devfile/generator" @@ -66,7 +67,16 @@ func GetPersistentVolumesAndVolumeMounts(devfileObj devfileParser.DevfileObj, co } var pvcVols []corev1.Volume - for volName, volInfo := range volumeNameToVolInfo { + + // We need to sort volumes to create Deployment in a deterministic way + keys := make([]string, 0, len(volumeNameToVolInfo)) + for k := range volumeNameToVolInfo { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, volName := range keys { + volInfo := volumeNameToVolInfo[volName] pvcVols = append(pvcVols, getPVC(volInfo.VolumeName, volInfo.PVCName)) // containerNameToMountPaths is a map of the Devfile container name to their Devfile Volume Mount Paths for a given Volume Name @@ -90,7 +100,16 @@ func GetEphemeralVolumesAndVolumeMounts(devfileObj devfileParser.DevfileObj, con return nil, err } var emptydirVols []corev1.Volume - for volName, volInfo := range ephemerals { + + // We need to sort volumes to create Deployment in a deterministic way + keys := make([]string, 0, len(ephemerals)) + for k := range ephemerals { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, volName := range keys { + volInfo := ephemerals[volName] emptyDir, err := getEmptyDir(volInfo.Name, volInfo.Spec.Size) if err != nil { return nil, err diff --git a/pkg/devfile/adapters/types.go b/pkg/devfile/adapters/types.go index 52f016a0fac..3ec4a21e52f 100644 --- a/pkg/devfile/adapters/types.go +++ b/pkg/devfile/adapters/types.go @@ -21,7 +21,6 @@ type PushParameters struct { EnvSpecificInfo envinfo.EnvSpecificInfo // EnvSpecificInfo contains information of env.yaml file Debug bool // Runs the component in debug mode DebugPort int // Port used for remote debugging - RunModeChanged bool // It determines if run mode is changed from run to debug or vice versa RandomPorts bool // True to forward containers ports on local random ports ErrOut io.Writer // Writer to output forwarded port information } diff --git a/pkg/kclient/binding.go b/pkg/kclient/binding.go index a0626491914..d5dc5fb7551 100644 --- a/pkg/kclient/binding.go +++ b/pkg/kclient/binding.go @@ -208,23 +208,29 @@ func (c Client) ListServiceBindingsFromAllGroups() ([]specApi.ServiceBinding, [] } specsU, err := c.ListDynamicResources("", specApi.GroupVersionResource) - if err != nil { - return nil, nil, err - } var specs specApi.ServiceBindingList - err = ConvertUnstructuredListToResource(*specsU, &specs) if err != nil { - return nil, nil, err + if !kerrors.IsForbidden(err) { + return nil, nil, err + } + } else { + err = ConvertUnstructuredListToResource(*specsU, &specs) + if err != nil { + return nil, nil, err + } } bindingsU, err := c.ListDynamicResources("", bindingApi.GroupVersionResource) - if err != nil { - return nil, nil, err - } var bindings bindingApi.ServiceBindingList - err = ConvertUnstructuredListToResource(*bindingsU, &bindings) if err != nil { - return nil, nil, err + if !kerrors.IsForbidden(err) { + return nil, nil, err + } + } else { + err = ConvertUnstructuredListToResource(*bindingsU, &bindings) + if err != nil { + return nil, nil, err + } } return specs.Items, bindings.Items, nil diff --git a/pkg/kclient/deployments.go b/pkg/kclient/deployments.go index 89c9e5fc369..5fc1e933e7a 100644 --- a/pkg/kclient/deployments.go +++ b/pkg/kclient/deployments.go @@ -3,9 +3,7 @@ package kclient import ( "context" "encoding/json" - "errors" "fmt" - "time" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -14,11 +12,10 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" "k8s.io/klog" odolabels "github.com/redhat-developer/odo/pkg/labels" - - apiMachineryWatch "k8s.io/apimachinery/pkg/watch" ) func boolPtr(b bool) *bool { @@ -28,10 +25,6 @@ func boolPtr(b bool) *bool { const ( DeploymentKind = "Deployment" DeploymentAPIVersion = "apps/v1" - - // TimedOutReason is added in a deployment when its newest replica set fails to show any progress - // within the given deadline (progressDeadlineSeconds). - timedOutReason = "ProgressDeadlineExceeded" ) // GetDeploymentByName gets a deployment by querying by name @@ -89,125 +82,6 @@ func (c *Client) GetDeploymentFromSelector(selector string) ([]appsv1.Deployment return deploymentList.Items, nil } -// getDeploymentCondition returns the condition with the provided type -// from https://github.com/kubernetes/kubectl/blob/8bc20f428d7d5aed031de5fa160081de7b5af2b0/pkg/util/deployment/deployment.go#L58 -func getDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition { - for i := range status.Conditions { - c := status.Conditions[i] - if c.Type == condType { - return &c - } - } - return nil -} - -// WaitForPodDeletion waits for the given pod to be deleted -func (c *Client) WaitForPodDeletion(name string) error { - watch, err := c.KubeClient.CoreV1().Pods(c.Namespace).Watch(context.TODO(), metav1.ListOptions{FieldSelector: "metadata.name=" + name}) - if err != nil { - return err - } - defer watch.Stop() - - if _, err = c.KubeClient.CoreV1().Pods(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{}); kerrors.IsNotFound(err) { - return nil - } - - for { - select { - case <-time.After(time.Minute): - return fmt.Errorf("timeout while waiting for %q pod to be deleted", name) - - case val, ok := <-watch.ResultChan(): - if !ok { - return errors.New("error getting value from resultchan") - } - if val.Type == apiMachineryWatch.Deleted { - return nil - } - } - } -} - -// WaitForDeploymentRollout waits for deployment to finish rollout. Returns the state of the deployment after rollout. -func (c *Client) WaitForDeploymentRollout(deploymentName string) (*appsv1.Deployment, error) { - klog.V(3).Infof("Waiting for %s deployment rollout", deploymentName) - - w, err := c.KubeClient.AppsV1().Deployments(c.Namespace).Watch(context.TODO(), metav1.ListOptions{FieldSelector: "metadata.name=" + deploymentName}) - if err != nil { - return nil, fmt.Errorf("unable to watch deployment: %w", err) - } - defer w.Stop() - - success := make(chan *appsv1.Deployment) - failure := make(chan error) - - // Collect all the events in a separate go routine - failedEvents := make(map[string]corev1.Event) - quit := make(chan int) - go c.CollectEvents("", failedEvents, quit) - - go func() { - defer close(success) - defer close(failure) - - for { - val, ok := <-w.ResultChan() - if !ok { - failure <- errors.New("watch channel was closed") - return - } - // based on https://github.com/kubernetes/kubectl/blob/9a3954bf653c874c8af6f855f2c754a8e1a44b9e/pkg/polymorphichelpers/rollout_status.go#L66-L91 - if deployment, ok := val.Object.(*appsv1.Deployment); ok { - for _, cond := range deployment.Status.Conditions { - // using this just for debugging message, so ignoring error on purpose - jsonCond, _ := json.Marshal(cond) - klog.V(3).Infof("Deployment Condition: %s", string(jsonCond)) - } - if deployment.Generation <= deployment.Status.ObservedGeneration { - cond := getDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing) - if cond != nil && cond.Reason == timedOutReason { - failure <- fmt.Errorf("deployment %q exceeded its progress deadline", deployment.Name) - } else if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas { - klog.V(3).Infof("Waiting for deployment %q rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Name, deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas) - } else if deployment.Status.Replicas > deployment.Status.UpdatedReplicas { - klog.V(3).Infof("Waiting for deployment %q rollout to finish: %d old replicas are pending termination...\n", deployment.Name, deployment.Status.Replicas-deployment.Status.UpdatedReplicas) - } else if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas { - klog.V(3).Infof("Waiting for deployment %q rollout to finish: %d of %d updated replicas are available...\n", deployment.Name, deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas) - } else { - klog.V(3).Infof("Deployment %q successfully rolled out\n", deployment.Name) - success <- deployment - } - } - klog.V(3).Infof("Waiting for deployment spec update to be observed...\n") - - } else { - failure <- errors.New("unable to convert event object to Pod") - } - } - }() - - select { - case val := <-success: - return val, nil - case err := <-failure: - return nil, err - case <-time.After(5 * time.Minute): - errorMessage := fmt.Sprintf("timeout while waiting for %s deployment roll out", deploymentName) - if len(failedEvents) != 0 { - tableString := getErrorMessageFromEvents(failedEvents) - - errorMessage = errorMessage + fmt.Sprintf(`\nFor more information to help determine the cause of the error, re-run with '-v'. -See below for a list of failed events that occured more than %d times during deployment: -%s`, failedEventCount, tableString.String()) - - return nil, fmt.Errorf(errorMessage) - } - - return nil, fmt.Errorf("timeout while waiting for %s deployment roll out", deploymentName) - } -} - func resourceAsJson(resource interface{}) string { data, _ := json.MarshalIndent(resource, " ", " ") return string(data) @@ -316,3 +190,13 @@ func (c *Client) GetDeploymentAPIVersion() (schema.GroupVersionKind, error) { func (c *Client) IsDeploymentExtensionsV1Beta1() (bool, error) { return c.IsResourceSupported("extensions", "v1beta1", "deployments") } + +// DeploymentWatcher returns a watcher on Deployments into the current namespace +// with the given label selector +func (o *Client) DeploymentWatcher(ctx context.Context, selector string) (watch.Interface, error) { + ns := o.GetCurrentNamespace() + return o.GetClient().AppsV1().Deployments(ns). + Watch(ctx, metav1.ListOptions{ + LabelSelector: selector, + }) +} diff --git a/pkg/kclient/dynamic.go b/pkg/kclient/dynamic.go index 6a0221a9666..4fd06d85fc4 100644 --- a/pkg/kclient/dynamic.go +++ b/pkg/kclient/dynamic.go @@ -21,19 +21,21 @@ import ( func (c *Client) PatchDynamicResource(resource unstructured.Unstructured) (bool, error) { klog.V(5).Infoln("Applying resource via server-side apply:") klog.V(5).Infoln(resourceAsJson(resource.Object)) - data, err := json.Marshal(resource.Object) + unversionedResource := resource.DeepCopy() + unversionedResource.SetResourceVersion("") + data, err := json.Marshal(unversionedResource.Object) if err != nil { return false, fmt.Errorf("unable to marshal resource: %w", err) } - gvr, err := c.GetRestMappingFromUnstructured(resource) + gvr, err := c.GetRestMappingFromUnstructured(*unversionedResource) if err != nil { return false, err } var previousGeneration int64 = -1 // Get the generation of the current resource - previous, err := c.DynamicClient.Resource(gvr.Resource).Namespace(c.Namespace).Get(context.TODO(), resource.GetName(), metav1.GetOptions{}) + previous, err := c.DynamicClient.Resource(gvr.Resource).Namespace(c.Namespace).Get(context.TODO(), unversionedResource.GetName(), metav1.GetOptions{}) if err != nil { if !kerrors.IsNotFound(err) { return false, err @@ -43,7 +45,7 @@ func (c *Client) PatchDynamicResource(resource unstructured.Unstructured) (bool, } // Patch the dynamic resource - current, err := c.DynamicClient.Resource(gvr.Resource).Namespace(c.Namespace).Patch(context.TODO(), resource.GetName(), types.ApplyPatchType, data, metav1.PatchOptions{FieldManager: FieldManager, Force: boolPtr(true)}) + current, err := c.DynamicClient.Resource(gvr.Resource).Namespace(c.Namespace).Patch(context.TODO(), unversionedResource.GetName(), types.ApplyPatchType, data, metav1.PatchOptions{FieldManager: FieldManager, Force: boolPtr(true)}) if err != nil { return false, err } diff --git a/pkg/kclient/events.go b/pkg/kclient/events.go index 856f9d5e090..5a0dd728925 100644 --- a/pkg/kclient/events.go +++ b/pkg/kclient/events.go @@ -2,65 +2,35 @@ package kclient import ( "context" - "sync" - corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog" - - "github.com/redhat-developer/odo/pkg/log" + "k8s.io/apimachinery/pkg/watch" ) -// We use a mutex here in order to make 100% sure that functions such as CollectEvents -// so that there are no race conditions -var mu sync.Mutex - -const ( - failedEventCount = 5 -) - -// CollectEvents collects events in a Goroutine by manipulating a spinner. -// We don't care about the error (it's usually ran in a go routine), so erroring out is not needed. -func (c *Client) CollectEvents(selector string, events map[string]corev1.Event, quit <-chan int) { - - // Secondly, we will start a go routine for watching for events related to the pod and update our pod status accordingly. - eventWatcher, err := c.KubeClient.CoreV1().Events(c.Namespace).Watch(context.TODO(), metav1.ListOptions{}) - if err != nil { - log.Warningf("Unable to watch for events: %s", err) - return - } - defer eventWatcher.Stop() - - // Create an endless loop for collecting - for { - select { - case <-quit: - klog.V(3).Info("Quitting collect events") - return - case val, ok := <-eventWatcher.ResultChan(): - mu.Lock() - if !ok { - log.Warning("Watch channel was closed") - return - } - if e, ok := val.Object.(*corev1.Event); ok { +type NoOpWatch struct{} - // If there are many warning events happening during deployment, let's log them. - if e.Type == "Warning" { +func (o NoOpWatch) Stop() {} - if e.Count >= failedEventCount { - newEvent := e - (events)[e.Name] = *newEvent - klog.V(3).Infof("Warning Event: Count: %d, Reason: %s, Message: %s", e.Count, e.Reason, e.Message) - } +func (o NoOpWatch) ResultChan() <-chan watch.Event { + return make(chan watch.Event) +} - } +// PodWarningEventWatcher watch for events in the current directory. If the watch is forbidden, a NoOp +// implementation of watch.Interface is returned +func (c *Client) PodWarningEventWatcher(ctx context.Context) (result watch.Interface, isForbidden bool, err error) { + selector := "involvedObject.kind=Pod,involvedObject.apiVersion=v1,type=Warning" + ns := c.GetCurrentNamespace() + result, err = c.GetClient().CoreV1().Events(ns). + Watch(ctx, metav1.ListOptions{ + FieldSelector: selector, + }) - } else { - log.Warning("Unable to convert object to event") - return - } - mu.Unlock() + if err != nil { + if kerrors.IsForbidden(err) { + return NoOpWatch{}, true, nil } + return nil, false, err } + return result, false, nil } diff --git a/pkg/kclient/events_test.go b/pkg/kclient/events_test.go deleted file mode 100644 index 4c8f338620b..00000000000 --- a/pkg/kclient/events_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package kclient - -import ( - "fmt" - "strings" - "testing" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - ktesting "k8s.io/client-go/testing" -) - -func fakeEventStatus(podName string, eventWarningMessage string, count int32) *corev1.Event { - return &corev1.Event{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - }, - Type: "Warning", - Count: count, - Reason: eventWarningMessage, - Message: "Foobar", - } -} - -func TestCollectEvents(t *testing.T) { - tests := []struct { - name string - podName string - eventWarningMessage string - }{ - { - name: "Case 1: Collect an arbitrary amount of events", - podName: "ruby", - eventWarningMessage: "Fake event warning message", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - // Create a fake client - fakeClient, fakeClientSet := FakeNew() - fakeEventWatch := watch.NewRaceFreeFake() - podSelector := fmt.Sprintf("deploymentconfig=%s", tt.podName) - - // Create a fake event status / watch reactor for faking the events we are collecting - fakeEvent := fakeEventStatus(tt.podName, tt.eventWarningMessage, 10) - go func(event *corev1.Event) { - fakeEventWatch.Add(event) - }(fakeEvent) - - fakeClientSet.Kubernetes.PrependWatchReactor("events", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { - return true, fakeEventWatch, nil - }) - - events := make(map[string]corev1.Event) - quit := make(chan int) - go fakeClient.CollectEvents(podSelector, events, quit) - - // Sleep in order to make sure we actually collect some events - time.Sleep(2 * time.Second) - close(quit) - - // We make sure to lock in order to prevent race conditions when retrieving the events (since they are a pointer - // by default since we pass in a map) - mu.Lock() - if len(events) == 0 { - t.Errorf("Expected events, got none") - } - mu.Unlock() - - // Collect the first event in the map - var firstEvent corev1.Event - for _, val := range events { - firstEvent = val - } - - if !strings.Contains(firstEvent.Reason, tt.eventWarningMessage) { - t.Errorf("expected warning message: '%s' in event message: '%+v'", tt.eventWarningMessage, firstEvent.Reason) - } - - }) - } -} diff --git a/pkg/kclient/interface.go b/pkg/kclient/interface.go index 5a82de33608..ffbaba42e6f 100644 --- a/pkg/kclient/interface.go +++ b/pkg/kclient/interface.go @@ -1,6 +1,7 @@ package kclient import ( + "context" "io" "time" @@ -13,6 +14,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -48,13 +50,12 @@ type ClientInterface interface { GetOneDeployment(componentName, appName string) (*appsv1.Deployment, error) GetOneDeploymentFromSelector(selector string) (*appsv1.Deployment, error) GetDeploymentFromSelector(selector string) ([]appsv1.Deployment, error) - WaitForPodDeletion(name string) error - WaitForDeploymentRollout(deploymentName string) (*appsv1.Deployment, error) CreateDeployment(deploy appsv1.Deployment) (*appsv1.Deployment, error) UpdateDeployment(deploy appsv1.Deployment) (*appsv1.Deployment, error) ApplyDeployment(deploy appsv1.Deployment) (*appsv1.Deployment, error) GetDeploymentAPIVersion() (schema.GroupVersionKind, error) IsDeploymentExtensionsV1Beta1() (bool, error) + DeploymentWatcher(ctx context.Context, selector string) (watch.Interface, error) // dynamic.go PatchDynamicResource(exampleCustomResource unstructured.Unstructured) (bool, error) @@ -64,7 +65,7 @@ type ClientInterface interface { DeleteDynamicResource(name string, gvr schema.GroupVersionResource, wait bool) error // events.go - CollectEvents(selector string, events map[string]corev1.Event, quit <-chan int) + PodWarningEventWatcher(ctx context.Context) (result watch.Interface, isForbidden bool, err error) // kclient.go GetClient() kubernetes.Interface @@ -106,14 +107,15 @@ type ClientInterface interface { TryWithBlockOwnerDeletion(ownerReference metav1.OwnerReference, exec func(ownerReference metav1.OwnerReference) error) error // pods.go - WaitAndGetPodWithEvents(selector string, desiredPhase corev1.PodPhase, pushTimeout time.Duration) (*corev1.Pod, error) ExecCMDInContainer(containerName, podName string, cmd []string, stdout io.Writer, stderr io.Writer, stdin io.Reader, tty bool) error ExtractProjectToComponent(containerName, podName string, targetPath string, stdin io.Reader) error GetPodUsingComponentName(componentName string) (*corev1.Pod, error) - GetOnePodFromSelector(selector string) (*corev1.Pod, error) + GetRunningPodFromSelector(selector string) (*corev1.Pod, error) GetPodLogs(podName, containerName string, followLog bool) (io.ReadCloser, error) GetAllPodsInNamespace() (*corev1.PodList, error) GetPodsMatchingSelector(selector string) (*corev1.PodList, error) + PodWatcher(ctx context.Context, selector string) (watch.Interface, error) + IsPodNameMatchingSelector(ctx context.Context, podname string, selector string) (bool, error) // port_forwarding.go // SetupPortForwarding creates port-forwarding for the pod on the port pairs provided in the @@ -157,6 +159,5 @@ type ClientInterface interface { ListPVCNames(selector string) ([]string, error) GetPVCFromName(pvcName string) (*corev1.PersistentVolumeClaim, error) UpdatePVCLabels(pvc *corev1.PersistentVolumeClaim, labels map[string]string) error - GetAndUpdateStorageOwnerReference(pvc *corev1.PersistentVolumeClaim, ownerReference ...metav1.OwnerReference) error UpdateStorageOwnerReference(pvc *corev1.PersistentVolumeClaim, ownerReference ...metav1.OwnerReference) error } diff --git a/pkg/kclient/mock_Client.go b/pkg/kclient/mock_Client.go index 1fbbe5951ec..63b1d2582c2 100644 --- a/pkg/kclient/mock_Client.go +++ b/pkg/kclient/mock_Client.go @@ -5,6 +5,7 @@ package kclient import ( + context "context" io "io" reflect "reflect" time "time" @@ -22,6 +23,7 @@ import ( v12 "k8s.io/apimachinery/pkg/apis/meta/v1" unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" schema "k8s.io/apimachinery/pkg/runtime/schema" + watch "k8s.io/apimachinery/pkg/watch" discovery "k8s.io/client-go/discovery" dynamic "k8s.io/client-go/dynamic" kubernetes "k8s.io/client-go/kubernetes" @@ -96,18 +98,6 @@ func (mr *MockClientInterfaceMockRecorder) ApplyDeployment(deploy interface{}) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyDeployment", reflect.TypeOf((*MockClientInterface)(nil).ApplyDeployment), deploy) } -// CollectEvents mocks base method. -func (m *MockClientInterface) CollectEvents(selector string, events map[string]v11.Event, quit <-chan int) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "CollectEvents", selector, events, quit) -} - -// CollectEvents indicates an expected call of CollectEvents. -func (mr *MockClientInterfaceMockRecorder) CollectEvents(selector, events, quit interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CollectEvents", reflect.TypeOf((*MockClientInterface)(nil).CollectEvents), selector, events, quit) -} - // CreateDeployment mocks base method. func (m *MockClientInterface) CreateDeployment(deploy v10.Deployment) (*v10.Deployment, error) { m.ctrl.T.Helper() @@ -309,6 +299,21 @@ func (mr *MockClientInterfaceMockRecorder) DeleteService(serviceName interface{} return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteService", reflect.TypeOf((*MockClientInterface)(nil).DeleteService), serviceName) } +// DeploymentWatcher mocks base method. +func (m *MockClientInterface) DeploymentWatcher(ctx context.Context, selector string) (watch.Interface, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeploymentWatcher", ctx, selector) + ret0, _ := ret[0].(watch.Interface) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeploymentWatcher indicates an expected call of DeploymentWatcher. +func (mr *MockClientInterfaceMockRecorder) DeploymentWatcher(ctx, selector interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeploymentWatcher", reflect.TypeOf((*MockClientInterface)(nil).DeploymentWatcher), ctx, selector) +} + // ExecCMDInContainer mocks base method. func (m *MockClientInterface) ExecCMDInContainer(containerName, podName string, cmd []string, stdout, stderr io.Writer, stdin io.Reader, tty bool) error { m.ctrl.T.Helper() @@ -381,25 +386,6 @@ func (mr *MockClientInterfaceMockRecorder) GetAllResourcesFromSelector(selector, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllResourcesFromSelector", reflect.TypeOf((*MockClientInterface)(nil).GetAllResourcesFromSelector), selector, ns) } -// GetAndUpdateStorageOwnerReference mocks base method. -func (m *MockClientInterface) GetAndUpdateStorageOwnerReference(pvc *v11.PersistentVolumeClaim, ownerReference ...v12.OwnerReference) error { - m.ctrl.T.Helper() - varargs := []interface{}{pvc} - for _, a := range ownerReference { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetAndUpdateStorageOwnerReference", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// GetAndUpdateStorageOwnerReference indicates an expected call of GetAndUpdateStorageOwnerReference. -func (mr *MockClientInterfaceMockRecorder) GetAndUpdateStorageOwnerReference(pvc interface{}, ownerReference ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{pvc}, ownerReference...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAndUpdateStorageOwnerReference", reflect.TypeOf((*MockClientInterface)(nil).GetAndUpdateStorageOwnerReference), varargs...) -} - // GetBindableKindStatusRestMapping mocks base method. func (m *MockClientInterface) GetBindableKindStatusRestMapping(bindableKindStatuses []v1alpha10.BindableKindsStatus) ([]*meta.RESTMapping, error) { m.ctrl.T.Helper() @@ -723,21 +709,6 @@ func (mr *MockClientInterfaceMockRecorder) GetOneDeploymentFromSelector(selector return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOneDeploymentFromSelector", reflect.TypeOf((*MockClientInterface)(nil).GetOneDeploymentFromSelector), selector) } -// GetOnePodFromSelector mocks base method. -func (m *MockClientInterface) GetOnePodFromSelector(selector string) (*v11.Pod, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetOnePodFromSelector", selector) - ret0, _ := ret[0].(*v11.Pod) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetOnePodFromSelector indicates an expected call of GetOnePodFromSelector. -func (mr *MockClientInterfaceMockRecorder) GetOnePodFromSelector(selector interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOnePodFromSelector", reflect.TypeOf((*MockClientInterface)(nil).GetOnePodFromSelector), selector) -} - // GetOneService mocks base method. func (m *MockClientInterface) GetOneService(componentName, appName string) (*v11.Service, error) { m.ctrl.T.Helper() @@ -903,6 +874,21 @@ func (mr *MockClientInterfaceMockRecorder) GetRestMappingFromUnstructured(arg0 i return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRestMappingFromUnstructured", reflect.TypeOf((*MockClientInterface)(nil).GetRestMappingFromUnstructured), arg0) } +// GetRunningPodFromSelector mocks base method. +func (m *MockClientInterface) GetRunningPodFromSelector(selector string) (*v11.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRunningPodFromSelector", selector) + ret0, _ := ret[0].(*v11.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRunningPodFromSelector indicates an expected call of GetRunningPodFromSelector. +func (mr *MockClientInterfaceMockRecorder) GetRunningPodFromSelector(selector interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRunningPodFromSelector", reflect.TypeOf((*MockClientInterface)(nil).GetRunningPodFromSelector), selector) +} + // GetSecret mocks base method. func (m *MockClientInterface) GetSecret(name, namespace string) (*v11.Secret, error) { m.ctrl.T.Helper() @@ -994,6 +980,21 @@ func (mr *MockClientInterfaceMockRecorder) IsDeploymentExtensionsV1Beta1() *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDeploymentExtensionsV1Beta1", reflect.TypeOf((*MockClientInterface)(nil).IsDeploymentExtensionsV1Beta1)) } +// IsPodNameMatchingSelector mocks base method. +func (m *MockClientInterface) IsPodNameMatchingSelector(ctx context.Context, podname, selector string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsPodNameMatchingSelector", ctx, podname, selector) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsPodNameMatchingSelector indicates an expected call of IsPodNameMatchingSelector. +func (mr *MockClientInterfaceMockRecorder) IsPodNameMatchingSelector(ctx, podname, selector interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsPodNameMatchingSelector", reflect.TypeOf((*MockClientInterface)(nil).IsPodNameMatchingSelector), ctx, podname, selector) +} + // IsProjectSupported mocks base method. func (m *MockClientInterface) IsProjectSupported() (bool, error) { m.ctrl.T.Helper() @@ -1204,6 +1205,37 @@ func (mr *MockClientInterfaceMockRecorder) PatchDynamicResource(exampleCustomRes return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PatchDynamicResource", reflect.TypeOf((*MockClientInterface)(nil).PatchDynamicResource), exampleCustomResource) } +// PodWarningEventWatcher mocks base method. +func (m *MockClientInterface) PodWarningEventWatcher(ctx context.Context) (watch.Interface, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PodWarningEventWatcher", ctx) + ret0, _ := ret[0].(watch.Interface) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// PodWarningEventWatcher indicates an expected call of PodWarningEventWatcher. +func (mr *MockClientInterfaceMockRecorder) PodWarningEventWatcher(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PodWarningEventWatcher", reflect.TypeOf((*MockClientInterface)(nil).PodWarningEventWatcher), ctx) +} + +// PodWatcher mocks base method. +func (m *MockClientInterface) PodWatcher(ctx context.Context, selector string) (watch.Interface, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PodWatcher", ctx, selector) + ret0, _ := ret[0].(watch.Interface) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PodWatcher indicates an expected call of PodWatcher. +func (mr *MockClientInterfaceMockRecorder) PodWatcher(ctx, selector interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PodWatcher", reflect.TypeOf((*MockClientInterface)(nil).PodWatcher), ctx, selector) +} + // RunLogout mocks base method. func (m *MockClientInterface) RunLogout(stdout io.Writer) error { m.ctrl.T.Helper() @@ -1376,21 +1408,6 @@ func (mr *MockClientInterfaceMockRecorder) UpdateStorageOwnerReference(pvc inter return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateStorageOwnerReference", reflect.TypeOf((*MockClientInterface)(nil).UpdateStorageOwnerReference), varargs...) } -// WaitAndGetPodWithEvents mocks base method. -func (m *MockClientInterface) WaitAndGetPodWithEvents(selector string, desiredPhase v11.PodPhase, pushTimeout time.Duration) (*v11.Pod, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitAndGetPodWithEvents", selector, desiredPhase, pushTimeout) - ret0, _ := ret[0].(*v11.Pod) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// WaitAndGetPodWithEvents indicates an expected call of WaitAndGetPodWithEvents. -func (mr *MockClientInterfaceMockRecorder) WaitAndGetPodWithEvents(selector, desiredPhase, pushTimeout interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitAndGetPodWithEvents", reflect.TypeOf((*MockClientInterface)(nil).WaitAndGetPodWithEvents), selector, desiredPhase, pushTimeout) -} - // WaitAndGetSecret mocks base method. func (m *MockClientInterface) WaitAndGetSecret(name, namespace string) (*v11.Secret, error) { m.ctrl.T.Helper() @@ -1406,35 +1423,6 @@ func (mr *MockClientInterfaceMockRecorder) WaitAndGetSecret(name, namespace inte return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitAndGetSecret", reflect.TypeOf((*MockClientInterface)(nil).WaitAndGetSecret), name, namespace) } -// WaitForDeploymentRollout mocks base method. -func (m *MockClientInterface) WaitForDeploymentRollout(deploymentName string) (*v10.Deployment, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitForDeploymentRollout", deploymentName) - ret0, _ := ret[0].(*v10.Deployment) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// WaitForDeploymentRollout indicates an expected call of WaitForDeploymentRollout. -func (mr *MockClientInterfaceMockRecorder) WaitForDeploymentRollout(deploymentName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForDeploymentRollout", reflect.TypeOf((*MockClientInterface)(nil).WaitForDeploymentRollout), deploymentName) -} - -// WaitForPodDeletion mocks base method. -func (m *MockClientInterface) WaitForPodDeletion(name string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitForPodDeletion", name) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitForPodDeletion indicates an expected call of WaitForPodDeletion. -func (mr *MockClientInterfaceMockRecorder) WaitForPodDeletion(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForPodDeletion", reflect.TypeOf((*MockClientInterface)(nil).WaitForPodDeletion), name) -} - // WaitForServiceAccountInNamespace mocks base method. func (m *MockClientInterface) WaitForServiceAccountInNamespace(namespace, serviceAccountName string) error { m.ctrl.T.Helper() diff --git a/pkg/kclient/pods.go b/pkg/kclient/pods.go index 5a2bfdba00e..5044b569b50 100644 --- a/pkg/kclient/pods.go +++ b/pkg/kclient/pods.go @@ -3,12 +3,9 @@ package kclient import ( "bytes" "context" - "encoding/json" - "errors" "fmt" "io" "strings" - "time" "k8s.io/klog" @@ -18,106 +15,11 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/remotecommand" ) -// WaitAndGetPod block and waits until pod matching selector is in in Running state -// desiredPhase cannot be PodFailed or PodUnknown -func (c *Client) WaitAndGetPodWithEvents(selector string, desiredPhase corev1.PodPhase, pushTimeout time.Duration) (*corev1.Pod, error) { - - klog.V(3).Infof("Waiting for %s pod", selector) - - var spinner *log.Status - defer func() { - if spinner != nil { - spinner.End(false) - } - }() - - w, err := c.KubeClient.CoreV1().Pods(c.Namespace).Watch(context.TODO(), metav1.ListOptions{ - LabelSelector: selector, - }) - if err != nil { - return nil, fmt.Errorf("unable to watch pod: %w", err) - } - defer w.Stop() - - // Here we are going to start a loop watching for the pod status - podChannel := make(chan *corev1.Pod) - watchErrorChannel := make(chan error) - failedEvents := make(map[string]corev1.Event) - go func() { - loop: - for { - val, ok := <-w.ResultChan() - if !ok { - watchErrorChannel <- errors.New("watch channel was closed") - break loop - } - if e, ok := val.Object.(*corev1.Pod); ok { - klog.V(3).Infof("Status of %s pod is %s", e.Name, e.Status.Phase) - for _, cond := range e.Status.Conditions { - // using this just for debugging message, so ignoring error on purpose - jsonCond, _ := json.Marshal(cond) - klog.V(3).Infof("Pod Conditions: %s", string(jsonCond)) - } - for _, status := range e.Status.ContainerStatuses { - // using this just for debugging message, so ignoring error on purpose - jsonStatus, _ := json.Marshal(status) - klog.V(3).Infof("Container Status: %s", string(jsonStatus)) - } - switch e.Status.Phase { - case desiredPhase: - klog.V(3).Infof("Pod %s is %v", e.Name, desiredPhase) - podChannel <- e - break loop - case corev1.PodFailed, corev1.PodUnknown: - watchErrorChannel <- fmt.Errorf("pod %s status %s", e.Name, e.Status.Phase) - break loop - default: - // we start in a phase different from the desired one, let's wait - // Collect all the events in a separate go routine - quit := make(chan int) - go c.CollectEvents(selector, failedEvents, quit) - defer close(quit) - } - } else { - watchErrorChannel <- errors.New("unable to convert event object to Pod") - break loop - } - } - close(podChannel) - close(watchErrorChannel) - }() - - select { - case val := <-podChannel: - if spinner != nil { - spinner.End(true) - } - return val, nil - case err := <-watchErrorChannel: - return nil, err - case <-time.After(pushTimeout): - - // Create a useful error if there are any failed events - errorMessage := fmt.Sprintf(`waited %s but couldn't find running pod matching selector: '%s'`, pushTimeout, selector) - - if len(failedEvents) != 0 { - - tableString := getErrorMessageFromEvents(failedEvents) - - errorMessage = fmt.Sprintf(`waited %s but was unable to find a running pod matching selector: '%s' -For more information to help determine the cause of the error, re-run with '-v'. -See below for a list of failed events that occured more than %d times during deployment: -%s`, pushTimeout, selector, failedEventCount, tableString.String()) - } - - return nil, fmt.Errorf(errorMessage) - } -} - // ExecCMDInContainer execute command in the container of a pod, pass an empty string for containerName to execute in the first container of the pod func (c *Client) ExecCMDInContainer(containerName, podName string, cmd []string, stdout io.Writer, stderr io.Writer, stdin io.Reader, tty bool) error { podExecOptions := corev1.PodExecOptions{ @@ -186,13 +88,14 @@ func (c *Client) ExtractProjectToComponent(containerName, podName string, target // GetPodUsingComponentName gets a pod using the component name func (c *Client) GetPodUsingComponentName(componentName string) (*corev1.Pod, error) { podSelector := fmt.Sprintf("component=%s", componentName) - return c.GetOnePodFromSelector(podSelector) + return c.GetRunningPodFromSelector(podSelector) } -// GetOnePodFromSelector gets a pod from the selector -func (c *Client) GetOnePodFromSelector(selector string) (*corev1.Pod, error) { +// GetRunningPodFromSelector gets a pod from the selector +func (c *Client) GetRunningPodFromSelector(selector string) (*corev1.Pod, error) { pods, err := c.KubeClient.CoreV1().Pods(c.Namespace).List(context.TODO(), metav1.ListOptions{ LabelSelector: selector, + FieldSelector: "status.phase=Running", }) if err != nil { // Don't wrap error since we want to know if it's a forbidden error @@ -248,3 +151,23 @@ func (c *Client) GetAllPodsInNamespace() (*corev1.PodList, error) { func (c *Client) GetPodsMatchingSelector(selector string) (*corev1.PodList, error) { return c.KubeClient.CoreV1().Pods(c.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector}) } + +func (c *Client) PodWatcher(ctx context.Context, selector string) (watch.Interface, error) { + ns := c.GetCurrentNamespace() + return c.GetClient().CoreV1().Pods(ns). + Watch(ctx, metav1.ListOptions{ + LabelSelector: selector, + }) +} + +func (c *Client) IsPodNameMatchingSelector(ctx context.Context, podname string, selector string) (bool, error) { + ns := c.GetCurrentNamespace() + list, err := c.GetClient().CoreV1().Pods(ns).List(ctx, metav1.ListOptions{ + FieldSelector: "metadata.name=" + podname, + LabelSelector: selector, + }) + if err != nil { + return false, err + } + return len(list.Items) > 0, nil +} diff --git a/pkg/kclient/pods_test.go b/pkg/kclient/pods_test.go index ff935f17331..0d9924a0355 100644 --- a/pkg/kclient/pods_test.go +++ b/pkg/kclient/pods_test.go @@ -8,97 +8,12 @@ import ( "k8s.io/apimachinery/pkg/runtime" - "github.com/redhat-developer/odo/pkg/preference" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - ktesting "k8s.io/client-go/testing" ) -func fakePodStatus(status corev1.PodPhase, podName string) *corev1.Pod { - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - }, - Status: corev1.PodStatus{ - Phase: status, - }, - } -} - -// NOTE: We do *not* collection the amount of actions taken in this function as there could be any number of fake -// 'event' actions that are happening in the background. -func TestWaitAndGetPodWithEvents(t *testing.T) { - tests := []struct { - name string - podName string - status corev1.PodPhase - wantEventWarning bool - wantErr bool - eventWarningMessage string - }{ - { - name: "Case 1: Pod running", - podName: "ruby", - status: corev1.PodRunning, - wantEventWarning: false, - wantErr: false, - }, - { - name: "Case 2: Pod failed", - podName: "ruby", - status: corev1.PodFailed, - wantEventWarning: false, - wantErr: true, - }, - { - name: "Case 3: Pod unknown", - podName: "ruby", - status: corev1.PodUnknown, - wantEventWarning: false, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - fakeClient, fakeClientSet := FakeNew() - fakePodWatch := watch.NewRaceFreeFake() - - // Watch for Pods - fakePod := fakePodStatus(tt.status, tt.podName) - go func(pod *corev1.Pod) { - fakePodWatch.Modify(pod) - }(fakePod) - - // Prepend watch reactor (beginning of the chain) - fakeClientSet.Kubernetes.PrependWatchReactor("pods", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { - return true, fakePodWatch, nil - }) - - podSelector := fmt.Sprintf("deploymentconfig=%s", tt.podName) - - pod, err := fakeClient.WaitAndGetPodWithEvents(podSelector, corev1.PodRunning, preference.DefaultPushTimeout) - - if !tt.wantErr == (err != nil) { - t.Errorf("client.WaitAndGetPod(string) unexpected error %v, wantErr %v", err, tt.wantErr) - return - } - - if err == nil { - if pod.Name != tt.podName { - t.Errorf("pod name is not matching to expected name, expected: %s, got %s", tt.podName, pod.Name) - } - } - - }) - } -} - func TestGetOnePodFromSelector(t *testing.T) { fakePod := FakePodStatus(corev1.PodRunning, "nodejs") fakePod.Labels["component"] = "nodejs" @@ -174,7 +89,7 @@ func TestGetOnePodFromSelector(t *testing.T) { return true, tt.returnedPods, nil }) - got, err := fkclient.GetOnePodFromSelector(tt.args.selector) + got, err := fkclient.GetRunningPodFromSelector(tt.args.selector) if (err != nil) != tt.wantErr { t.Errorf("GetOnePodFromSelector() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/pkg/kclient/volumes.go b/pkg/kclient/volumes.go index 839c200df60..91498a94ef4 100644 --- a/pkg/kclient/volumes.go +++ b/pkg/kclient/volumes.go @@ -2,12 +2,14 @@ package kclient import ( "context" + "encoding/json" "errors" "fmt" "github.com/devfile/library/pkg/devfile/generator" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) // constants for volumes @@ -72,26 +74,6 @@ func (c *Client) UpdatePVCLabels(pvc *corev1.PersistentVolumeClaim, labels map[s return nil } -// GetAndUpdateStorageOwnerReference updates the given storage with the given owner references -func (c *Client) GetAndUpdateStorageOwnerReference(pvc *corev1.PersistentVolumeClaim, ownerReference ...metav1.OwnerReference) error { - if len(ownerReference) <= 0 { - return errors.New("owner references are empty") - } - // get the latest version of the PVC to avoid conflict errors - latestPVC, err := c.KubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) - if err != nil { - return err - } - for _, owRf := range ownerReference { - latestPVC.SetOwnerReferences(append(pvc.GetOwnerReferences(), owRf)) - } - _, err = c.KubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Update(context.TODO(), latestPVC, metav1.UpdateOptions{FieldManager: FieldManager}) - if err != nil { - return err - } - return nil -} - // UpdateStorageOwnerReference updates the given storage with the given owner references func (c *Client) UpdateStorageOwnerReference(pvc *corev1.PersistentVolumeClaim, ownerReference ...metav1.OwnerReference) error { if len(ownerReference) <= 0 { @@ -106,9 +88,23 @@ func (c *Client) UpdateStorageOwnerReference(pvc *corev1.PersistentVolumeClaim, updatedPVC.OwnerReferences = ownerReference updatedPVC.Spec = pvc.Spec - _, err := c.KubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Update(context.TODO(), updatedPVC, metav1.UpdateOptions{FieldManager: FieldManager}) - if err != nil { - return err + if c.IsSSASupported() { + updatedPVC.APIVersion, updatedPVC.Kind = corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim").ToAPIVersionAndKind() + updatedPVC.SetManagedFields(nil) + updatedPVC.SetResourceVersion("") + data, err := json.Marshal(updatedPVC) + if err != nil { + return fmt.Errorf("unable to marshal deployment: %w", err) + } + _, err = c.KubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Patch(context.TODO(), updatedPVC.Name, types.ApplyPatchType, data, metav1.PatchOptions{FieldManager: FieldManager, Force: boolPtr(true)}) + if err != nil { + return err + } + } else { + _, err := c.KubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Update(context.TODO(), updatedPVC, metav1.UpdateOptions{FieldManager: FieldManager}) + if err != nil { + return err + } } return nil } diff --git a/pkg/kclient/volumes_test.go b/pkg/kclient/volumes_test.go index e5b689bcec9..6ef8c8bc7ee 100644 --- a/pkg/kclient/volumes_test.go +++ b/pkg/kclient/volumes_test.go @@ -347,57 +347,3 @@ func TestListPVCNames(t *testing.T) { }) } } - -func TestUpdateStorageOwnerReference(t *testing.T) { - fakeDeployment := testingutil.CreateFakeDeployment("nodejs") - type args struct { - pvc *corev1.PersistentVolumeClaim - ownerReference []metav1.OwnerReference - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - name: "case 1: valid pvc", - args: args{ - pvc: testingutil.FakePVC("pvc-1", "1Gi", map[string]string{}), - ownerReference: []metav1.OwnerReference{ - generator.GetOwnerReference(fakeDeployment), - }, - }, - wantErr: false, - }, - { - name: "case 2: empty owner reference for pvc", - args: args{ - pvc: testingutil.FakePVC("pvc-1", "1Gi", map[string]string{}), - ownerReference: []metav1.OwnerReference{}, - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fakeClient, fakeClientSet := FakeNew() - - fakeClientSet.Kubernetes.PrependReactor("get", "persistentvolumeclaims", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { - returnedPVC := *tt.args.pvc - return true, &returnedPVC, nil - }) - - fakeClientSet.Kubernetes.PrependReactor("update", "persistentvolumeclaims", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { - pvc := action.(ktesting.UpdateAction).GetObject().(*corev1.PersistentVolumeClaim) - if pvc.OwnerReferences == nil || pvc.OwnerReferences[0].Name != fakeDeployment.Name { - t.Errorf("owner reference not set for dc %s", tt.args.pvc.Name) - } - return true, pvc, nil - }) - - if err := fakeClient.GetAndUpdateStorageOwnerReference(tt.args.pvc, tt.args.ownerReference...); (err != nil) != tt.wantErr { - t.Errorf("updateStorageOwnerReference() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/pkg/log/status.go b/pkg/log/status.go index 8b3438ca9f7..6a131f2f433 100644 --- a/pkg/log/status.go +++ b/pkg/log/status.go @@ -250,9 +250,15 @@ func Successf(format string, a ...interface{}) { // Warning will output in an appropriate "progress" manner // ⚠ func Warning(a ...interface{}) { + Fwarning(GetStderr(), a...) +} + +// Fwarning will output in an appropriate "progress" manner in out writer +// ⚠ +func Fwarning(out io.Writer, a ...interface{}) { if !IsJSON() { yellow := color.New(color.FgYellow).SprintFunc() - fmt.Fprintf(GetStderr(), "%s%s%s%s", prefixSpacing, yellow(getWarningString()), suffixSpacing, fmt.Sprintln(a...)) + fmt.Fprintf(out, "%s%s%s%s", prefixSpacing, yellow(getWarningString()), suffixSpacing, fmt.Sprintln(a...)) } } @@ -265,6 +271,15 @@ func Warningf(format string, a ...interface{}) { } } +// Fsuccess will output in an appropriate "progress" manner in out writer +// ✓ +func Fsuccess(out io.Writer, a ...interface{}) { + if !IsJSON() { + green := color.New(color.FgGreen).SprintFunc() + fmt.Fprintf(out, "%s%s%s%s", prefixSpacing, green(getSuccessString()), suffixSpacing, fmt.Sprintln(a...)) + } +} + // Title Prints the logo as well as the first line being BLUE (indicator of the command information) // the second and third lines are optional and provide information with regards to what is being ran // __ diff --git a/pkg/odo/cli/dev/dev.go b/pkg/odo/cli/dev/dev.go index 00105c2559d..d213478d808 100644 --- a/pkg/odo/cli/dev/dev.go +++ b/pkg/odo/cli/dev/dev.go @@ -29,6 +29,7 @@ import ( "github.com/redhat-developer/odo/pkg/odo/genericclioptions/clientset" odoutil "github.com/redhat-developer/odo/pkg/odo/util" scontext "github.com/redhat-developer/odo/pkg/segment/context" + "github.com/redhat-developer/odo/pkg/util" "github.com/redhat-developer/odo/pkg/vars" "github.com/redhat-developer/odo/pkg/version" "github.com/redhat-developer/odo/pkg/watch" @@ -180,15 +181,6 @@ func (o *DevOptions) Complete(cmdline cmdline.Cmdline, args []string) error { o.clientset.KubernetesClient.SetNamespace(o.GetProject()) - // 3 steps to evaluate the paths to be ignored when "watching" the pwd/cwd for changes - // 1. create an empty string slice to which paths like .gitignore, .odo/odo-file-index.json, etc. will be added - var ignores []string - err = genericclioptions.ApplyIgnore(&ignores, "") - if err != nil { - return err - } - o.ignorePaths = ignores - return nil } @@ -221,49 +213,70 @@ func (o *DevOptions) Run(ctx context.Context) (err error) { "Namespace: "+namespace, "odo version: "+version.VERSION) - log.Section("Deploying to the cluster in developer mode") - err = o.clientset.DevClient.Start(devFileObj, namespace, o.ignorePaths, path, o.debugFlag, o.buildCommandFlag, o.runCommandFlag, o.randomPortsFlag, o.errOut) + // check for .gitignore file and add odo-file-index.json to .gitignore + gitIgnoreFile, err := util.TouchGitIgnoreFile(path) if err != nil { return err } - log.Info("\nYour application is now running on the cluster") + // add odo-file-index.json path to .gitignore + err = util.AddOdoFileIndex(gitIgnoreFile) + if err != nil { + return err + } + + // add devstate.json path to .gitignore + err = util.AddOdoDevState(gitIgnoreFile) + if err != nil { + return err + } + + var ignores []string + err = genericclioptions.ApplyIgnore(&ignores, "") + if err != nil { + return err + } + // Ignore the devfile, as it will be handled independently + o.ignorePaths = ignores + + log.Section("Deploying to the cluster in developer mode") + componentStatus, err := o.clientset.DevClient.Start(devFileObj, namespace, o.ignorePaths, path, o.debugFlag, o.buildCommandFlag, o.runCommandFlag, o.randomPortsFlag, o.errOut) + if err != nil { + return err + } scontext.SetComponentType(ctx, component.GetComponentTypeFromDevfileMetadata(devFileObj.Data.GetMetadata())) scontext.SetLanguage(ctx, devFileObj.Data.GetMetadata().Language) scontext.SetProjectType(ctx, devFileObj.Data.GetMetadata().ProjectType) scontext.SetDevfileName(ctx, devFileObj.GetMetadataName()) - if o.noWatchFlag { - log.Finfof(log.GetStdout(), "\n"+watch.CtrlCMessage) - <-o.ctx.Done() - err = o.clientset.WatchClient.CleanupDevResources(devFileObj, log.GetStdout()) - } else { - d := Handler{ - clientset: *o.clientset, - randomPorts: o.randomPortsFlag, - errOut: o.errOut, - } - err = o.clientset.DevClient.Watch( - devFileObj, - path, - o.ignorePaths, - o.out, - &d, - o.ctx, - o.debugFlag, - o.buildCommandFlag, - o.runCommandFlag, - o.variables, - o.randomPortsFlag, - o.errOut, - ) + d := Handler{ + clientset: *o.clientset, + randomPorts: o.randomPortsFlag, + errOut: o.errOut, } + err = o.clientset.DevClient.Watch( + o.GetDevfilePath(), + devFileObj, + path, + o.ignorePaths, + o.out, + &d, + o.ctx, + o.debugFlag, + o.buildCommandFlag, + o.runCommandFlag, + o.variables, + o.randomPortsFlag, + !o.noWatchFlag, + o.errOut, + componentStatus, + ) return err } // RegenerateAdapterAndPush regenerates the adapter and pushes the files to remote pod -func (o *Handler) RegenerateAdapterAndPush(pushParams adapters.PushParameters, watchParams watch.WatchParameters) error { +func (o *Handler) RegenerateAdapterAndPush(pushParams adapters.PushParameters, watchParams watch.WatchParameters, componentStatus *watch.ComponentStatus) error { var adapter kcomponent.ComponentAdapter adapter, err := o.regenerateComponentAdapterFromWatchParams(watchParams) @@ -271,7 +284,7 @@ func (o *Handler) RegenerateAdapterAndPush(pushParams adapters.PushParameters, w return fmt.Errorf("unable to generate component from watch parameters: %w", err) } - err = adapter.Push(pushParams) + err = adapter.Push(pushParams, componentStatus) if err != nil { return fmt.Errorf("watch command was unable to push component: %w", err) } @@ -289,6 +302,7 @@ func (o *Handler) regenerateComponentAdapterFromWatchParams(parameters watch.Wat o.clientset.KubernetesClient, o.clientset.PreferenceClient, o.clientset.PortForwardClient, + o.clientset.BindingClient, kcomponent.AdapterContext{ ComponentName: parameters.ComponentName, Context: parameters.Path, @@ -331,6 +345,7 @@ It forwards endpoints with exposure values 'public' or 'internal' to a port on l devCmd.Flags().StringVar(&o.runCommandFlag, "run-command", "", "Alternative run command to execute. The default one will be used if this flag is not set.") clientset.Add(devCmd, + clientset.BINDING, clientset.DEV, clientset.FILESYSTEM, clientset.INIT, diff --git a/pkg/odo/genericclioptions/clientset/clientset.go b/pkg/odo/genericclioptions/clientset/clientset.go index 409e3d3c9a1..bb4673f12bd 100644 --- a/pkg/odo/genericclioptions/clientset/clientset.go +++ b/pkg/odo/genericclioptions/clientset/clientset.go @@ -75,14 +75,14 @@ var subdeps map[string][]string = map[string][]string{ ALIZER: {REGISTRY}, DELETE_COMPONENT: {KUBERNETES}, DEPLOY: {KUBERNETES}, - DEV: {KUBERNETES, PORT_FORWARD, PREFERENCE, WATCH}, + DEV: {BINDING, KUBERNETES, PORT_FORWARD, PREFERENCE, WATCH}, INIT: {ALIZER, FILESYSTEM, PREFERENCE, REGISTRY}, LOGS: {KUBERNETES}, PORT_FORWARD: {KUBERNETES, STATE}, PROJECT: {KUBERNETES_NULLABLE}, REGISTRY: {FILESYSTEM, PREFERENCE}, STATE: {FILESYSTEM}, - WATCH: {DELETE_COMPONENT, STATE}, + WATCH: {KUBERNETES, DELETE_COMPONENT, STATE}, BINDING: {PROJECT, KUBERNETES}, /* Add sub-dependencies here, if any */ } @@ -172,7 +172,7 @@ func Fetch(command *cobra.Command) (*Clientset, error) { dep.StateClient = state.NewStateClient(dep.FS) } if isDefined(command, WATCH) { - dep.WatchClient = watch.NewWatchClient(dep.DeleteClient, dep.StateClient) + dep.WatchClient = watch.NewWatchClient(dep.KubernetesClient, dep.DeleteClient, dep.StateClient) } if isDefined(command, BINDING) { dep.BindingClient = binding.NewBindingClient(dep.ProjectClient, dep.KubernetesClient) @@ -181,7 +181,7 @@ func Fetch(command *cobra.Command) (*Clientset, error) { dep.PortForwardClient = portForward.NewPFClient(dep.KubernetesClient, dep.StateClient) } if isDefined(command, DEV) { - dep.DevClient = dev.NewDevClient(dep.KubernetesClient, dep.PreferenceClient, dep.PortForwardClient, dep.WatchClient) + dep.DevClient = dev.NewDevClient(dep.KubernetesClient, dep.PreferenceClient, dep.PortForwardClient, dep.WatchClient, dep.BindingClient) } /* Instantiate new clients here. Take care to instantiate after all sub-dependencies */ diff --git a/pkg/portForward/interface.go b/pkg/portForward/interface.go index ec2a5885271..48803d64af1 100644 --- a/pkg/portForward/interface.go +++ b/pkg/portForward/interface.go @@ -20,4 +20,10 @@ type Client interface { // StopPortForwarding stops the port forwarding StopPortForwarding() + + // GetForwardedPorts returns the list of ports for each containers currently forwarded + GetForwardedPorts() map[string][]int + + // GetPortsToForward returns the endpoints to forward from the Devfile + GetPortsToForward(devFileObj parser.DevfileObj) (map[string][]int, error) } diff --git a/pkg/portForward/portForward.go b/pkg/portForward/portForward.go index 844a7f6288f..7101fc2fb0f 100644 --- a/pkg/portForward/portForward.go +++ b/pkg/portForward/portForward.go @@ -44,14 +44,11 @@ func (o *PFClient) StartPortForwarding( errOut io.Writer, ) error { - // get the endpoint/port information for containers in devfile and setup port-forwarding - containers, err := devFileObj.Data.GetComponents(parsercommon.DevfileOptions{ - ComponentOptions: parsercommon.ComponentOptions{ComponentType: v1alpha2.ContainerComponentType}, - }) + ceMapping, err := o.GetPortsToForward(devFileObj) if err != nil { return err } - ceMapping := libdevfile.GetContainerEndpointMapping(containers) + if o.stopChan != nil && reflect.DeepEqual(ceMapping, o.appliedEndpoints) { return nil } @@ -106,6 +103,23 @@ func (o *PFClient) StopPortForwarding() { o.finishedChan = nil } +func (o *PFClient) GetForwardedPorts() map[string][]int { + return o.appliedEndpoints +} + +func (o *PFClient) GetPortsToForward(devFileObj parser.DevfileObj) (map[string][]int, error) { + + // get the endpoint/port information for containers in devfile + containers, err := devFileObj.Data.GetComponents(parsercommon.DevfileOptions{ + ComponentOptions: parsercommon.ComponentOptions{ComponentType: v1alpha2.ContainerComponentType}, + }) + if err != nil { + return nil, err + } + ceMapping := libdevfile.GetContainerEndpointMapping(containers) + return ceMapping, nil +} + // randomPortPairsFromContainerEndpoints assigns a random (empty) port on localhost to each port in the provided containerEndpoints map // it returns a map of the format "":{":", ":"} // "container1": {":3000", ":3001"} diff --git a/pkg/service/link.go b/pkg/service/link.go index 1f44810c925..a73ee2bb1c7 100644 --- a/pkg/service/link.go +++ b/pkg/service/link.go @@ -31,12 +31,11 @@ import ( ) // PushLinks updates Link(s) from Kubernetes Inlined component in a devfile by creating new ones or removing old ones -// returns true if the component needs to be restarted (when a link has been created or deleted) // if service binding operator is not present, it will call pushLinksWithoutOperator to create the links without it. -func PushLinks(client kclient.ClientInterface, devfileObj parser.DevfileObj, k8sComponents []devfile.Component, labels map[string]string, deployment *v1.Deployment, context string) (bool, error) { +func PushLinks(client kclient.ClientInterface, devfileObj parser.DevfileObj, k8sComponents []devfile.Component, labels map[string]string, deployment *v1.Deployment, context string) error { serviceBindingSupport, err := client.IsServiceBindingSupported() if err != nil { - return false, err + return err } if !serviceBindingSupport { @@ -48,13 +47,12 @@ func PushLinks(client kclient.ClientInterface, devfileObj parser.DevfileObj, k8s } // pushLinksWithOperator creates links or deletes links (if service binding operator is installed) between components and services -// returns true if the component needs to be restarted (a secret was generated and added to the deployment) -func pushLinksWithOperator(client kclient.ClientInterface, devfileObj parser.DevfileObj, k8sComponents []devfile.Component, labels map[string]string, deployment *v1.Deployment, context string) (bool, error) { +func pushLinksWithOperator(client kclient.ClientInterface, devfileObj parser.DevfileObj, k8sComponents []devfile.Component, labels map[string]string, deployment *v1.Deployment, context string) error { ownerReference := generator.GetOwnerReference(deployment) deployed, err := ListDeployedServices(client, labels) if err != nil { - return false, err + return err } for key, deployedResource := range deployed { @@ -63,21 +61,19 @@ func pushLinksWithOperator(client kclient.ClientInterface, devfileObj parser.Dev } } - restartNeeded := false - // create an object on the kubernetes cluster for all the Kubernetes Inlined components var strCRD string for _, c := range k8sComponents { // get the string representation of the YAML definition of a CRD strCRD, err = libdevfile.GetK8sManifestWithVariablesSubstituted(devfileObj, c.Name, context, devfilefs.DefaultFs{}) if err != nil { - return false, err + return err } // convert the YAML definition into map[string]interface{} since it's needed to create dynamic resource u := unstructured.Unstructured{} if e := yaml.Unmarshal([]byte(strCRD), &u.Object); e != nil { - return false, e + return e } if !isLinkResource(u.GetKind()) { @@ -90,22 +86,16 @@ func pushLinksWithOperator(client kclient.ClientInterface, devfileObj parser.Dev u.SetOwnerReferences([]metav1.OwnerReference{ownerReference}) u.SetLabels(labels) - var updated bool - updated, err = updateOperatorService(client, u) + _, err = updateOperatorService(client, u) delete(deployed, u.GetKind()+"/"+crdName) if err != nil { if strings.Contains(err.Error(), "already exists") { // TODO: better way to handle this might be introduced by https://github.com/redhat-developer/odo/issues/4553 continue // this ensures that services slice is not updated } else { - return false, err + return err } } - - // uncomment/modify when service linking is enabled in v3 - // name := u.GetName() - // log.Successf("Created link %q using Service Binding Operator on the cluster; component will be restarted", name) - restartNeeded = restartNeeded || updated } for key, val := range deployed { @@ -114,30 +104,27 @@ func pushLinksWithOperator(client kclient.ClientInterface, devfileObj parser.Dev } err = DeleteOperatorService(client, key) if err != nil { - return false, err + return err } - // uncomment/modify when service linking is enabled in v3 - // log.Successf("Deleted link %q using Service Binding Operator on the cluster; component will be restarted", key) - restartNeeded = true } - return restartNeeded, nil + return nil } // pushLinksWithoutOperator creates links or deletes links (if service binding operator is not installed) between components and services // returns true if the component needs to be restarted (a secret was generated and added to the deployment) -func pushLinksWithoutOperator(client kclient.ClientInterface, devfileObj parser.DevfileObj, k8sComponents []devfile.Component, labels map[string]string, deployment *v1.Deployment, context string) (bool, error) { +func pushLinksWithoutOperator(client kclient.ClientInterface, devfileObj parser.DevfileObj, k8sComponents []devfile.Component, labels map[string]string, deployment *v1.Deployment, context string) error { // check csv support before proceeding csvSupport, err := client.IsCSVSupported() if err != nil { - return false, err + return err } secrets, err := client.ListSecrets(odolabels.GetSelector(odolabels.GetComponentName(labels), odolabels.GetAppName(labels), odolabels.ComponentAnyMode)) if err != nil { - return false, err + return err } ownerReferences := generator.GetOwnerReference(deployment) @@ -156,13 +143,13 @@ func pushLinksWithoutOperator(client kclient.ClientInterface, devfileObj parser. // get the string representation of the YAML definition of a CRD strCRD, err = libdevfile.GetK8sManifestWithVariablesSubstituted(devfileObj, c.Name, context, devfilefs.DefaultFs{}) if err != nil { - return false, err + return err } // convert the YAML definition into map[string]interface{} since it's needed to create dynamic resource u := unstructured.Unstructured{} if e := yaml.Unmarshal([]byte(strCRD), &u.Object); e != nil { - return false, e + return e } if !isLinkResource(u.GetKind()) { @@ -176,11 +163,9 @@ func pushLinksWithoutOperator(client kclient.ClientInterface, devfileObj parser. deploymentGVK, err := client.GetDeploymentAPIVersion() if err != nil { - return false, err + return err } - var restartRequired bool - // delete the links not present on the devfile for linkName, secretName := range clusterLinksMap { if _, ok := localLinksMap[linkName]; !ok { @@ -208,23 +193,20 @@ func pushLinksWithoutOperator(client kclient.ClientInterface, devfileObj parser. if processingPipeline == nil { processingPipeline, err = getPipeline(client) if err != nil { - return false, err + return err } } _, err = processingPipeline.Process(&newServiceBinding) if err != nil { - return false, err + return err } // since the library currently doesn't delete the secret after unbinding // delete the secret manually err = client.DeleteSecret(secretName, client.GetCurrentNamespace()) if err != nil { - return false, err + return err } - restartRequired = true - // uncomment/modify when service linking is enabled in v3 - // log.Successf("Deleted link %q on the cluster; component will be restarted", linkName) } } @@ -237,7 +219,7 @@ func pushLinksWithoutOperator(client kclient.ClientInterface, devfileObj parser. // prevent listing of services unless required services, e := client.ListServices("") if e != nil { - return false, e + return e } // get the services and get match them against the component @@ -251,7 +233,7 @@ func pushLinksWithoutOperator(client kclient.ClientInterface, devfileObj parser. var serviceBinding sboApi.ServiceBinding err = yaml.Unmarshal([]byte(strCRD), &serviceBinding) if err != nil { - return false, err + return err } if len(serviceBinding.Spec.Services) != 1 { @@ -271,13 +253,13 @@ func pushLinksWithoutOperator(client kclient.ClientInterface, devfileObj parser. _, err = json.MarshalIndent(serviceBinding, " ", " ") if err != nil { - return false, err + return err } if processingPipeline == nil { processingPipeline, err = getPipeline(client) if err != nil { - return false, err + return err } } @@ -285,19 +267,19 @@ func pushLinksWithoutOperator(client kclient.ClientInterface, devfileObj parser. if err != nil { if kerrors.IsForbidden(err) { // due to https://github.com/redhat-developer/service-binding-operator/issues/1003 - return false, fmt.Errorf("please install the service binding operator") + return fmt.Errorf("please install the service binding operator") } - return false, err + return err } if len(serviceBinding.Status.Secret) == 0 { - return false, fmt.Errorf("no secret was provided by service binding's pipleine") + return fmt.Errorf("no secret was provided by service binding's pipleine") } // get the generated secret and update it with the labels and owner reference secret, err := client.GetSecret(serviceBinding.Status.Secret, client.GetCurrentNamespace()) if err != nil { - return false, err + return err } secret.Labels = labels secret.Labels[LinkLabel] = linkName @@ -314,19 +296,12 @@ func pushLinksWithoutOperator(client kclient.ClientInterface, devfileObj parser. secret.SetOwnerReferences([]metav1.OwnerReference{ownerReferences}) _, err = client.UpdateSecret(secret, client.GetCurrentNamespace()) if err != nil { - return false, err + return err } - restartRequired = true - // uncomment/modify when service linking is enabled in v3 - // log.Successf("Created link %q on the cluster; component will be restarted", linkName) } } - if restartRequired { - return true, nil - } - - return false, nil + return nil } // getPipeline gets the pipeline to process service binding requests diff --git a/pkg/service/service.go b/pkg/service/service.go index f896045df3b..696c2fabd8f 100644 --- a/pkg/service/service.go +++ b/pkg/service/service.go @@ -380,15 +380,15 @@ func isLinkResource(kind string) bool { func updateOperatorService(client kclient.ClientInterface, u unstructured.Unstructured) (bool, error) { // Create the service on cluster - createSpinner := log.Spinnerf("Creating kind %s", u.GetKind()) - defer createSpinner.End(false) - updated, err := client.PatchDynamicResource(u) if err != nil { return false, err } - createSpinner.End(true) + if updated { + createSpinner := log.Spinnerf("Creating kind %s", u.GetKind()) + createSpinner.End(true) + } return updated, err } diff --git a/pkg/util/file_indexer.go b/pkg/util/file_indexer.go index ccbc6710319..99bba556023 100644 --- a/pkg/util/file_indexer.go +++ b/pkg/util/file_indexer.go @@ -20,6 +20,7 @@ import ( const DotOdoDirectory = ".odo" const fileIndexName = "odo-file-index.json" +const fileDevState = "devstate.json" // FileIndex holds the file index used for storing local file state change type FileIndex struct { @@ -105,6 +106,15 @@ func addOdoFileIndex(gitIgnoreFile string, fs filesystem.Filesystem) error { return addFileToIgnoreFile(gitIgnoreFile, filepath.Join(DotOdoDirectory, fileIndexName), fs) } +// AddOdoDevState adds devstate.json to .gitignore +func AddOdoDevState(gitIgnoreFile string) error { + return addOdoDevState(gitIgnoreFile, filesystem.DefaultFs{}) +} + +func addOdoDevState(gitIgnoreFile string, fs filesystem.Filesystem) error { + return addFileToIgnoreFile(gitIgnoreFile, filepath.Join(DotOdoDirectory, fileDevState), fs) +} + // TouchGitIgnoreFile checks .gitignore file exists or not, if not then create it func TouchGitIgnoreFile(directory string) (string, error) { return touchGitIgnoreFile(directory, filesystem.DefaultFs{}) @@ -216,18 +226,6 @@ func RunIndexerWithRemote(directory string, originalIgnoreRules []string, remote return ret, err } - // check for .gitignore file and add odo-file-index.json to .gitignore - gitIgnoreFile, err := TouchGitIgnoreFile(directory) - if err != nil { - return ret, err - } - - // add odo-file-index.json path to .gitignore - err = AddOdoFileIndex(gitIgnoreFile) - if err != nil { - return ret, err - } - // read the odo index file existingFileIndex, err := ReadFileIndex(ret.ResolvedPath) if err != nil { diff --git a/pkg/util/util.go b/pkg/util/util.go index 3455ea021c0..34625356754 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -446,6 +446,7 @@ func sliceContainsString(str string, slice []string) bool { func addFileToIgnoreFile(gitIgnoreFile, filename string, fs filesystem.Filesystem) error { var data []byte + filename = filepath.ToSlash(filename) file, err := fs.OpenFile(gitIgnoreFile, os.O_APPEND|os.O_RDWR, dfutil.ModeReadWriteFile) if err != nil { return fmt.Errorf("failed to open .gitignore file: %w", err) diff --git a/pkg/watch/backo.go b/pkg/watch/backo.go new file mode 100644 index 00000000000..a54adefcd4f --- /dev/null +++ b/pkg/watch/backo.go @@ -0,0 +1,30 @@ +package watch + +import ( + "time" + + "github.com/segmentio/backo-go" + "k8s.io/klog" +) + +type ExpBackoff struct { + attempt int + backo *backo.Backo +} + +func NewExpBackoff() *ExpBackoff { + return &ExpBackoff{ + backo: backo.DefaultBacko(), + } +} + +func (o *ExpBackoff) Delay() time.Duration { + duration := o.backo.Duration(o.attempt) + klog.V(4).Infof("wait for %v\n", duration) + o.attempt++ + return duration +} + +func (o *ExpBackoff) Reset() { + o.attempt = 0 +} diff --git a/pkg/watch/interface.go b/pkg/watch/interface.go index 49c01467e50..df574b1c75e 100644 --- a/pkg/watch/interface.go +++ b/pkg/watch/interface.go @@ -10,7 +10,10 @@ import ( type Client interface { // WatchAndPush watches the component under the context directory and triggers Push if there are any changes // It also listens on ctx's Done channel to trigger cleanup when indicated to do so - WatchAndPush(out io.Writer, parameters WatchParameters, ctx context.Context) error + // componentStatus is a variable to store the status of the component, and that will be exchanged between + // parts of code (unfortunately, tthere is no place to store the status of the component in some Kubernetes resource + // as it is generally done for a Kubernetes resource) + WatchAndPush(out io.Writer, parameters WatchParameters, ctx context.Context, componentStatus ComponentStatus) error // CleanupDevResources deletes the component created using the devfileObj and writes any outputs to out CleanupDevResources(devfileObj parser.DevfileObj, out io.Writer) error } diff --git a/pkg/watch/mock.go b/pkg/watch/mock.go index 0a29c55959d..72904a5ffba 100644 --- a/pkg/watch/mock.go +++ b/pkg/watch/mock.go @@ -51,15 +51,15 @@ func (mr *MockClientMockRecorder) CleanupDevResources(devfileObj, out interface{ } // WatchAndPush mocks base method. -func (m *MockClient) WatchAndPush(out io.Writer, parameters WatchParameters, ctx context.Context) error { +func (m *MockClient) WatchAndPush(out io.Writer, parameters WatchParameters, ctx context.Context, componentStatus ComponentStatus) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WatchAndPush", out, parameters, ctx) + ret := m.ctrl.Call(m, "WatchAndPush", out, parameters, ctx, componentStatus) ret0, _ := ret[0].(error) return ret0 } // WatchAndPush indicates an expected call of WatchAndPush. -func (mr *MockClientMockRecorder) WatchAndPush(out, parameters, ctx interface{}) *gomock.Call { +func (mr *MockClientMockRecorder) WatchAndPush(out, parameters, ctx, componentStatus interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchAndPush", reflect.TypeOf((*MockClient)(nil).WatchAndPush), out, parameters, ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchAndPush", reflect.TypeOf((*MockClient)(nil).WatchAndPush), out, parameters, ctx, componentStatus) } diff --git a/pkg/watch/pod_phases.go b/pkg/watch/pod_phases.go new file mode 100644 index 00000000000..361152fe8ab --- /dev/null +++ b/pkg/watch/pod_phases.go @@ -0,0 +1,73 @@ +package watch + +import ( + "io" + "sort" + "strings" + + "github.com/redhat-developer/odo/pkg/log" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type PodPhases map[metav1.Time]corev1.PodPhase + +func NewPodPhases() PodPhases { + return map[metav1.Time]corev1.PodPhase{} +} + +func (o *PodPhases) Add(out io.Writer, k metav1.Time, pod *corev1.Pod) { + v := pod.Status.Phase + if pod.GetDeletionTimestamp() != nil { + v = "Terminating" + } + display := false + if (*o)[k] != v { + display = true + } + (*o)[k] = v + if display { + o.Display(out) + } +} + +func (o *PodPhases) Delete(out io.Writer, pod *corev1.Pod) { + k := pod.GetCreationTimestamp() + if _, ok := (*o)[k]; ok { + delete(*o, k) + o.Display(out) + } +} + +func (o PodPhases) Display(out io.Writer) { + + if len(o) == 0 { + log.Fwarning(out, "No pod exists") + return + } + + keys := make([]metav1.Time, 0, len(o)) + for k := range o { + keys = append(keys, k) + } + + if len(keys) == 1 { + phase := o[keys[0]] + if phase == corev1.PodRunning { + log.Fsuccess(out, "Pod is "+phase) + return + } + log.Fwarning(out, "Pod is "+phase) + return + } + + sort.Slice(keys, func(i, j int) bool { + return keys[i].Before(&keys[j]) + }) + + values := make([]string, 0, len(o)) + for _, k := range keys { + values = append(values, string(o[k])) + } + log.Fwarning(out, "Pods are "+strings.Join(values, ", ")) +} diff --git a/pkg/watch/status.go b/pkg/watch/status.go new file mode 100644 index 00000000000..7fc12b6edc1 --- /dev/null +++ b/pkg/watch/status.go @@ -0,0 +1,24 @@ +package watch + +type State string + +const ( + StateWaitDeployment State = "WaitDeployment" + StateSyncOutdated State = "SyncOutdated" + //StateWaitBindings State = "WaitBindings" + //StatePodRunning State = "PodRunning" + //StateFilesSynced State = "FilesSynced" + //StateBuildCommandExecuted State = "BuildCommandExecuted" + //StateRunCommandRunning State = "RunCommandRunning" + StateReady State = "Ready" +) + +type ComponentStatus struct { + State State + PostStartEventsDone bool + EndpointsForwarded map[string][]int +} + +func componentCanSyncFile(state State) bool { + return state == StateReady +} diff --git a/pkg/watch/watch.go b/pkg/watch/watch.go index d8001e4ffb2..ac549232716 100644 --- a/pkg/watch/watch.go +++ b/pkg/watch/watch.go @@ -2,27 +2,34 @@ package watch import ( "context" + "errors" "fmt" "io" "os" "path/filepath" + "reflect" "time" "github.com/devfile/library/pkg/devfile/parser" _delete "github.com/redhat-developer/odo/pkg/component/delete" "github.com/redhat-developer/odo/pkg/devfile/adapters" + "github.com/redhat-developer/odo/pkg/kclient" "github.com/redhat-developer/odo/pkg/labels" + "github.com/redhat-developer/odo/pkg/log" "github.com/redhat-developer/odo/pkg/state" "github.com/fsnotify/fsnotify" gitignore "github.com/sabhiram/go-gitignore" "github.com/redhat-developer/odo/pkg/envinfo" - "github.com/redhat-developer/odo/pkg/log" "github.com/redhat-developer/odo/pkg/util" dfutil "github.com/devfile/library/pkg/util" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" "k8s.io/klog" ) @@ -33,14 +40,16 @@ const ( ) type WatchClient struct { + kubeClient kclient.ClientInterface deleteClient _delete.Client stateClient state.Client } var _ Client = (*WatchClient)(nil) -func NewWatchClient(deleteClient _delete.Client, stateClient state.Client) *WatchClient { +func NewWatchClient(kubeClient kclient.ClientInterface, deleteClient _delete.Client, stateClient state.Client) *WatchClient { return &WatchClient{ + kubeClient: kubeClient, deleteClient: deleteClient, stateClient: stateClient, } @@ -52,6 +61,8 @@ type WatchParameters struct { ComponentName string // Name of application, the component is part of ApplicationName string + // DevfilePath is the path of the devfile + DevfilePath string // The path to the source of component(local or binary) Path string // List/Slice of files/folders in component source, the updates to which need not be pushed to component deployed pod @@ -59,7 +70,7 @@ type WatchParameters struct { // Custom function that can be used to push detected changes to remote pod. For more info about what each of the parameters to this function, please refer, pkg/component/component.go#PushLocal // WatchHandler func(kclient.ClientInterface, string, string, string, io.Writer, []string, []string, bool, []string, bool) error // Custom function that can be used to push detected changes to remote devfile pod. For more info about what each of the parameters to this function, please refer, pkg/devfile/adapters/interface.go#PlatformAdapter - DevfileWatchHandler func(adapters.PushParameters, WatchParameters) error + DevfileWatchHandler func(adapters.PushParameters, WatchParameters, *ComponentStatus) error // Parameter whether or not to show build logs Show bool // EnvSpecificInfo contains information of env.yaml file @@ -80,6 +91,8 @@ type WatchParameters struct { Variables map[string]string // RandomPorts is true to forward containers ports on local random ports RandomPorts bool + // WatchFiles indicates to watch for file changes and sync changes to the container + WatchFiles bool // ErrOut is a Writer to output forwarded port information ErrOut io.Writer } @@ -90,7 +103,8 @@ type WatchParameters struct { type evaluateChangesFunc func(events []fsnotify.Event, path string, fileIgnores []string, watcher *fsnotify.Watcher) (changedFiles, deletedPaths []string) // processEventsFunc processes the events received on the watcher. It uses the WatchParameters to trigger watch handler and writes to out -type processEventsFunc func(changedFiles, deletedPaths []string, parameters WatchParameters, out io.Writer) +// It returns a Duration after which to recall in case of error +type processEventsFunc func(changedFiles, deletedPaths []string, parameters WatchParameters, out io.Writer, componentStatus *ComponentStatus, backoff *ExpBackoff) (*time.Duration, error) // cleanupFunc deletes the component created using the devfileObj and writes any outputs to out type cleanupFunc func(devfileObj parser.DevfileObj, out io.Writer) error @@ -202,60 +216,227 @@ func addRecursiveWatch(watcher *fsnotify.Watcher, rootPath string, path string, return nil } -func (o *WatchClient) WatchAndPush(out io.Writer, parameters WatchParameters, ctx context.Context) error { +func (o *WatchClient) WatchAndPush(out io.Writer, parameters WatchParameters, ctx context.Context, componentStatus ComponentStatus) error { klog.V(4).Infof("starting WatchAndPush, path: %s, component: %s, ignores %s", parameters.Path, parameters.ComponentName, parameters.FileIgnores) - absIgnorePaths := dfutil.GetAbsGlobExps(parameters.Path, parameters.FileIgnores) + var sourcesWatcher *fsnotify.Watcher + var err error + if parameters.WatchFiles { + // TODO(feloy) ignore files included by Devfile? + sourcesWatcher, err = getFullSourcesWatcher(parameters.Path, parameters.FileIgnores) + if err != nil { + return err + } + } else { + sourcesWatcher, err = fsnotify.NewWatcher() + if err != nil { + return err + } + } + defer sourcesWatcher.Close() + + selector := labels.GetSelector(parameters.ComponentName, parameters.ApplicationName, labels.ComponentDevMode) + deploymentWatcher, err := o.kubeClient.DeploymentWatcher(ctx, selector) + if err != nil { + return fmt.Errorf("error watching deployment: %v", err) + } + + // TODO(feloy) watch files included by Devfile + devfileWatcher, err := fsnotify.NewWatcher() + if err != nil { + return err + } + if parameters.WatchFiles { + err = devfileWatcher.Add(parameters.DevfilePath) + if err != nil { + return err + } + } + + podWatcher, err := o.kubeClient.PodWatcher(ctx, selector) + if err != nil { + return err + } + + warningsWatcher, isForbidden, err := o.kubeClient.PodWarningEventWatcher(ctx) + if err != nil { + return err + } + if isForbidden { + log.Fwarning(out, "Unable to watch Events resource, warning Events won't be displayed") + } + + return o.eventWatcher(ctx, sourcesWatcher, deploymentWatcher, devfileWatcher, podWatcher, warningsWatcher, parameters, out, evaluateFileChanges, processEvents, o.CleanupDevResources, componentStatus) +} + +func getFullSourcesWatcher(path string, fileIgnores []string) (*fsnotify.Watcher, error) { + absIgnorePaths := dfutil.GetAbsGlobExps(path, fileIgnores) watcher, err := fsnotify.NewWatcher() if err != nil { - return fmt.Errorf("error setting up filesystem watcher: %v", err) + return nil, fmt.Errorf("error setting up filesystem watcher: %v", err) } - defer watcher.Close() // adding watch on the root folder and the sub folders recursively // so directory and the path in addRecursiveWatch() are the same - err = addRecursiveWatch(watcher, parameters.Path, parameters.Path, absIgnorePaths) + err = addRecursiveWatch(watcher, path, path, absIgnorePaths) if err != nil { - return fmt.Errorf("error watching source path %s: %v", parameters.Path, err) + return nil, fmt.Errorf("error watching source path %s: %v", path, err) } - - printInfoMessage(out, parameters.Path) - - return eventWatcher(ctx, watcher, parameters, out, evaluateFileChanges, processEvents, o.CleanupDevResources) + return watcher, nil } // eventWatcher loops till the context's Done channel indicates it to stop looping, at which point it performs cleanup. // While looping, it listens for filesystem events and processes these events using the WatchParameters to push to the remote pod. // It outputs any logs to the out io Writer -func eventWatcher(ctx context.Context, watcher *fsnotify.Watcher, parameters WatchParameters, out io.Writer, evaluateChangesHandler evaluateChangesFunc, processEventsHandler processEventsFunc, cleanupHandler cleanupFunc) error { +func (o *WatchClient) eventWatcher(ctx context.Context, sourcesWatcher *fsnotify.Watcher, deploymentWatcher watch.Interface, devfileWatcher *fsnotify.Watcher, podWatcher watch.Interface, eventsWatcher watch.Interface, parameters WatchParameters, out io.Writer, evaluateChangesHandler evaluateChangesFunc, processEventsHandler processEventsFunc, cleanupHandler cleanupFunc, componentStatus ComponentStatus) error { + + expBackoff := NewExpBackoff() + var events []fsnotify.Event - // timer helps collect multiple events that happen in a quick succession. We start with 1ms as we don't care much - // at this point. In the select block, however, every time we receive an event, we reset the timer to watch for + // sourcesTimer helps collect multiple events that happen in a quick succession. We start with 1ms as we don't care much + // at this point. In the select block, however, every time we receive an event, we reset the sourcesTimer to watch for // 100ms since receiving that event. This is done because a single filesystem event by the user triggers multiple // events for fsnotify. It's a known-issue, but not really bug. For more info look at below issues: // - https://github.com/fsnotify/fsnotify/issues/122 // - https://github.com/fsnotify/fsnotify/issues/344 - timer := time.NewTimer(time.Millisecond) - <-timer.C + sourcesTimer := time.NewTimer(time.Millisecond) + <-sourcesTimer.C + + devfileTimer := time.NewTimer(time.Millisecond) + <-devfileTimer.C + + deployTimer := time.NewTimer(time.Millisecond) + <-deployTimer.C + + retryTimer := time.NewTimer(time.Millisecond) + <-retryTimer.C + + podsPhases := NewPodPhases() for { select { - case event := <-watcher.Events: + case event := <-sourcesWatcher.Events: events = append(events, event) // We are waiting for more events in this interval - timer.Reset(100 * time.Millisecond) - case <-timer.C: + sourcesTimer.Reset(100 * time.Millisecond) + case <-sourcesTimer.C: // timer has fired + if !componentCanSyncFile(componentStatus.State) { + continue + } // first find the files that have changed (also includes the ones newly created) or deleted - changedFiles, deletedPaths := evaluateChangesHandler(events, parameters.Path, parameters.FileIgnores, watcher) + changedFiles, deletedPaths := evaluateChangesHandler(events, parameters.Path, parameters.FileIgnores, sourcesWatcher) // process the changes and sync files with remote pod - processEventsHandler(changedFiles, deletedPaths, parameters, out) + if len(changedFiles) == 0 && len(deletedPaths) == 0 { + continue + } + componentStatus.State = StateSyncOutdated + fmt.Fprintf(out, "Pushing files...\n\n") + retry, err := processEventsHandler(changedFiles, deletedPaths, parameters, out, &componentStatus, expBackoff) + if err != nil { + return err + } // empty the events to receive new events - events = []fsnotify.Event{} // empty the events slice to capture new events - case watchErr := <-watcher.Errors: + if componentStatus.State == StateReady { + events = []fsnotify.Event{} // empty the events slice to capture new events + } + + if retry != nil { + retryTimer.Reset(*retry) + } else { + retryTimer.Reset(time.Millisecond) + <-retryTimer.C + } + + case watchErr := <-sourcesWatcher.Errors: return watchErr + + case ev := <-deploymentWatcher.ResultChan(): + switch obj := ev.Object.(type) { + case *appsv1.Deployment: + klog.V(4).Infof("deployment watcher Event: Type: %s, name: %s, rv: %s, pods: %d\n", + ev.Type, obj.GetName(), obj.GetResourceVersion(), obj.Status.ReadyReplicas) + deployTimer.Reset(300 * time.Millisecond) + + case *metav1.Status: + klog.V(4).Infof("Status: %+v\n", obj) + } + + case <-deployTimer.C: + retry, err := processEventsHandler(nil, nil, parameters, out, &componentStatus, expBackoff) + if err != nil { + return err + } + if retry != nil { + retryTimer.Reset(*retry) + } else { + retryTimer.Reset(time.Millisecond) + <-retryTimer.C + } + + case <-devfileWatcher.Events: + devfileTimer.Reset(100 * time.Millisecond) + + case <-devfileTimer.C: + fmt.Fprintf(out, "Updating Component...\n\n") + retry, err := processEventsHandler(nil, nil, parameters, out, &componentStatus, expBackoff) + if err != nil { + return err + } + if retry != nil { + retryTimer.Reset(*retry) + } else { + retryTimer.Reset(time.Millisecond) + <-retryTimer.C + } + + case <-retryTimer.C: + retry, err := processEventsHandler(nil, nil, parameters, out, &componentStatus, expBackoff) + if err != nil { + return err + } + if retry != nil { + retryTimer.Reset(*retry) + } else { + retryTimer.Reset(time.Millisecond) + <-retryTimer.C + } + + case ev := <-podWatcher.ResultChan(): + switch ev.Type { + case watch.Deleted: + pod, ok := ev.Object.(*corev1.Pod) + if !ok { + return errors.New("unable to decode watch event") + } + podsPhases.Delete(out, pod) + case watch.Added, watch.Modified: + pod, ok := ev.Object.(*corev1.Pod) + if !ok { + return errors.New("unable to decode watch event") + } + podsPhases.Add(out, pod.GetCreationTimestamp(), pod) + } + + case ev := <-eventsWatcher.ResultChan(): + switch kevent := ev.Object.(type) { + case *corev1.Event: + podName := kevent.InvolvedObject.Name + selector := labels.GetSelector(parameters.ComponentName, parameters.ApplicationName, labels.ComponentDevMode) + matching, err := o.kubeClient.IsPodNameMatchingSelector(ctx, podName, selector) + if err != nil { + return err + } + if matching { + log.Fwarning(out, kevent.Message) + } + } + + case watchErr := <-devfileWatcher.Errors: + return watchErr + case <-ctx.Done(): return cleanupHandler(parameters.InitialDevfileObj, out) } @@ -328,18 +509,19 @@ func evaluateFileChanges(events []fsnotify.Event, path string, fileIgnores []str return changedFiles, deletedPaths } -func processEvents(changedFiles, deletedPaths []string, parameters WatchParameters, out io.Writer) { - if len(changedFiles) == 0 && len(deletedPaths) == 0 { - return - } - +func processEvents( + changedFiles, deletedPaths []string, + parameters WatchParameters, + out io.Writer, + componentStatus *ComponentStatus, + backoff *ExpBackoff, +) (*time.Duration, error) { for _, file := range removeDuplicates(append(changedFiles, deletedPaths...)) { fmt.Fprintf(out, "\nFile %s changed\n", file) } var hasFirstSuccessfulPushOccurred bool - fmt.Fprintf(out, "Pushing files...\n\n") klog.V(4).Infof("Copying files %s to pod", changedFiles) pushParams := adapters.PushParameters{ @@ -358,15 +540,30 @@ func processEvents(changedFiles, deletedPaths []string, parameters WatchParamete RandomPorts: parameters.RandomPorts, ErrOut: parameters.ErrOut, } - err := parameters.DevfileWatchHandler(pushParams, parameters) + oldStatus := *componentStatus + err := parameters.DevfileWatchHandler(pushParams, parameters, componentStatus) if err != nil { - // Log and output, but intentionally not exiting on error here. - // We don't want to break watch when push failed, it might be fixed with the next change. + if isFatal(err) { + return nil, err + } klog.V(4).Infof("Error from Push: %v", err) - fmt.Fprintf(out, "%s - %s\n\n", PushErrorString, err.Error()) - } else { + if parameters.WatchFiles { + // Log and output, but intentionally not exiting on error here. + // We don't want to break watch when push failed, it might be fixed with the next change. + fmt.Fprintf(out, "%s - %s\n\n", PushErrorString, err.Error()) + } else { + return nil, err + } + wait := backoff.Delay() + return &wait, nil + } + backoff.Reset() + if oldStatus.State != StateReady && componentStatus.State == StateReady || + !reflect.DeepEqual(oldStatus.EndpointsForwarded, componentStatus.EndpointsForwarded) { + printInfoMessage(out, parameters.Path) } + return nil, nil } func (o *WatchClient) CleanupDevResources(devfileObj parser.DevfileObj, out io.Writer) error { @@ -436,3 +633,7 @@ func removeDuplicates(input []string) []string { func printInfoMessage(out io.Writer, path string) { log.Finfof(out, "\nWatching for changes in the current directory %s\n"+CtrlCMessage+"\n", path) } + +func isFatal(err error) bool { + return errors.As(err, &adapters.ErrPortForward{}) +} diff --git a/pkg/watch/watch_test.go b/pkg/watch/watch_test.go index b74073d758a..26f23f5f400 100644 --- a/pkg/watch/watch_test.go +++ b/pkg/watch/watch_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/devfile/library/pkg/devfile/parser" + "k8s.io/apimachinery/pkg/watch" "github.com/fsnotify/fsnotify" ) @@ -40,8 +41,9 @@ func evaluateChangesHandler(events []fsnotify.Event, path string, fileIgnores [] return changedFiles, deletedPaths } -func processEventsHandler(changedFiles, deletedPaths []string, _ WatchParameters, out io.Writer) { +func processEventsHandler(changedFiles, deletedPaths []string, _ WatchParameters, out io.Writer, componentStatus *ComponentStatus, backo *ExpBackoff) (*time.Duration, error) { fmt.Fprintf(out, "changedFiles %s deletedPaths %s\n", changedFiles, deletedPaths) + return nil, nil } func cleanupHandler(_ parser.DevfileObj, out io.Writer) error { @@ -49,6 +51,15 @@ func cleanupHandler(_ parser.DevfileObj, out io.Writer) error { return nil } +type fakeWatcher struct{} + +func (o fakeWatcher) Stop() { +} + +func (o fakeWatcher) ResultChan() <-chan watch.Event { + return make(chan watch.Event, 1) +} + func Test_eventWatcher(t *testing.T) { type args struct { parameters WatchParameters @@ -66,7 +77,7 @@ func Test_eventWatcher(t *testing.T) { args: args{ parameters: WatchParameters{}, }, - wantOut: "changedFiles [file1 file2] deletedPaths []\ncleanup done", + wantOut: "Pushing files...\n\nchangedFiles [file1 file2] deletedPaths []\ncleanup done", wantErr: false, watcherEvents: []fsnotify.Event{{Name: "file1", Op: fsnotify.Create}, {Name: "file2", Op: fsnotify.Write}}, watcherError: nil, @@ -86,7 +97,7 @@ func Test_eventWatcher(t *testing.T) { args: args{ parameters: WatchParameters{FileIgnores: []string{"file1"}}, }, - wantOut: "changedFiles [] deletedPaths [file1 file2]\ncleanup done", + wantOut: "Pushing files...\n\nchangedFiles [] deletedPaths [file1 file2]\ncleanup done", wantErr: false, watcherEvents: []fsnotify.Event{{Name: "file1", Op: fsnotify.Remove}, {Name: "file2", Op: fsnotify.Rename}}, watcherError: nil, @@ -105,6 +116,7 @@ func Test_eventWatcher(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { watcher, _ := fsnotify.NewWatcher() + fileWatcher, _ := fsnotify.NewWatcher() var cancel context.CancelFunc ctx, cancel := context.WithCancel(context.Background()) out := &bytes.Buffer{} @@ -121,7 +133,12 @@ func Test_eventWatcher(t *testing.T) { cancel() }() - err := eventWatcher(ctx, watcher, tt.args.parameters, out, evaluateChangesHandler, processEventsHandler, cleanupHandler) + componentStatus := ComponentStatus{ + State: StateReady, + } + + o := WatchClient{} + err := o.eventWatcher(ctx, watcher, fakeWatcher{}, fileWatcher, fakeWatcher{}, fakeWatcher{}, tt.args.parameters, out, evaluateChangesHandler, processEventsHandler, cleanupHandler, componentStatus) if (err != nil) != tt.wantErr { t.Errorf("eventWatcher() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/tests/examples/source/devfiles/nodejs/devfile-deploy-functional-pods.yaml b/tests/examples/source/devfiles/nodejs/devfile-deploy-functional-pods.yaml index 3656dec2c8a..ccb1524a656 100644 --- a/tests/examples/source/devfiles/nodejs/devfile-deploy-functional-pods.yaml +++ b/tests/examples/source/devfiles/nodejs/devfile-deploy-functional-pods.yaml @@ -83,6 +83,9 @@ components: containers: - name: main image: "{{CONTAINER_IMAGE}}" + readinessProbe: + httpGet: + port: 3000 - name: another-deployment kubernetes: inlined: | @@ -103,17 +106,20 @@ components: containers: - name: main image: "{{CONTAINER_IMAGE}}" + readinessProbe: + httpGet: + port: 3000 - name: innerloop-pod kubernetes: inlined: | apiVersion: v1 kind: Pod metadata: - name: myapp + name: myapp-inner spec: containers: - name: main - image: alpine + image: quay.io/quay/busybox command: ["/bin/sh"] args: [ "-c", "while true; do echo \"`date` - this is infinite while loop\"; sleep 5; done" ] - name: outerloop-pod @@ -122,10 +128,10 @@ components: apiVersion: v1 kind: Pod metadata: - name: myapp + name: myapp-outer spec: containers: - name: main - image: alpine + image: quay.io/quay/busybox command: ["/bin/sh"] args: [ "-c", "while true; do echo \"`date` - this is infinite while loop\"; sleep 5; done" ] \ No newline at end of file diff --git a/tests/examples/source/devfiles/nodejs/devfile-with-volume-components.yaml b/tests/examples/source/devfiles/nodejs/devfile-with-volume-components.yaml index 000f25f7ec1..b6ed4846a25 100644 --- a/tests/examples/source/devfiles/nodejs/devfile-with-volume-components.yaml +++ b/tests/examples/source/devfiles/nodejs/devfile-with-volume-components.yaml @@ -36,7 +36,7 @@ components: volume: {} - name: secondvol volume: - size: 3Gi + size: 200Mi commands: - id: devbuild exec: diff --git a/tests/examples/source/devfiles/nodejs/devfile-with-volumes.yaml b/tests/examples/source/devfiles/nodejs/devfile-with-volumes.yaml index da1476ea231..7fa113d97da 100644 --- a/tests/examples/source/devfiles/nodejs/devfile-with-volumes.yaml +++ b/tests/examples/source/devfiles/nodejs/devfile-with-volumes.yaml @@ -32,7 +32,7 @@ components: path: /data2 - name: myvol volume: - size: 3Gi + size: 200Mi - name: myvol2 volume: {} commands: diff --git a/tests/helper/helper_dev.go b/tests/helper/helper_dev.go index 0a8e9b1d57b..1110c03f3cc 100644 --- a/tests/helper/helper_dev.go +++ b/tests/helper/helper_dev.go @@ -160,7 +160,7 @@ func (o DevSession) WaitEnd() { // It returns the contents of the standard and error outputs // since the end of the dev mode started or previous sync, and until the end of the synchronization. func (o DevSession) WaitSync() ([]byte, []byte, map[string]string, error) { - WaitForOutputToContain("Pushing files...", 180, 10, o.session) + WaitForOutputToContainOne([]string{"Pushing files...", "Updating Component..."}, 180, 10, o.session) WaitForOutputToContain("Watching for changes in the current directory", 240, 10, o.session) outContents := o.session.Out.Contents() errContents := o.session.Err.Contents() diff --git a/tests/helper/helper_kubectl.go b/tests/helper/helper_kubectl.go index 98018243ed3..0f7c7ab2855 100644 --- a/tests/helper/helper_kubectl.go +++ b/tests/helper/helper_kubectl.go @@ -79,7 +79,7 @@ func (kubectl KubectlRunner) CheckCmdOpInRemoteDevfilePod(podName string, contai // devfile component by passing component name as a argument func (kubectl KubectlRunner) GetRunningPodNameByComponent(compName string, namespace string) string { selector := fmt.Sprintf("--selector=component=%s", compName) - stdOut := Cmd(kubectl.path, "get", ResourceTypePod, "--namespace", namespace, selector, "-o", "jsonpath={.items[*].metadata.name}").ShouldPass().Out() + stdOut := Cmd(kubectl.path, "get", ResourceTypePod, "--namespace", namespace, "--field-selector=status.phase=Running", selector, "-o", "jsonpath={.items[*].metadata.name}").ShouldPass().Out() return strings.TrimSpace(stdOut) } @@ -93,7 +93,7 @@ func (kubectl KubectlRunner) GetPVCSize(compName, storageName, namespace string) // GetPodInitContainers executes kubectl command and returns the init containers of the pod func (kubectl KubectlRunner) GetPodInitContainers(compName string, namespace string) []string { selector := fmt.Sprintf("--selector=component=%s", compName) - stdOut := Cmd(kubectl.path, "get", ResourceTypePod, "--namespace", namespace, selector, "-o", "jsonpath={.items[*].spec.initContainers[*].name}").ShouldPass().Out() + stdOut := Cmd(kubectl.path, "get", ResourceTypePod, "--namespace", namespace, "--field-selector=status.phase=Running", selector, "-o", "jsonpath={.items[*].spec.initContainers[*].name}").ShouldPass().Out() return strings.Split(stdOut, " ") } diff --git a/tests/helper/helper_run.go b/tests/helper/helper_run.go index f86b8d9c931..945ba38ff7a 100644 --- a/tests/helper/helper_run.go +++ b/tests/helper/helper_run.go @@ -10,6 +10,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" + "github.com/onsi/gomega/types" "github.com/redhat-developer/odo/pkg/labels" ) @@ -40,6 +41,18 @@ func WaitForOutputToContain(substring string, timeoutInSeconds int, intervalInSe } +func WaitForOutputToContainOne(substrings []string, timeoutInSeconds int, intervalInSeconds int, session *gexec.Session) { + + matchers := make([]types.GomegaMatcher, 0, len(substrings)) + for _, substring := range substrings { + matchers = append(matchers, ContainSubstring(substring)) + } + Eventually(func() string { + contents := string(session.Out.Contents()) + return contents + }, timeoutInSeconds, intervalInSeconds).Should(SatisfyAny(matchers...)) +} + // WaitForErroutToContain waits for the session stdout output to contain a particular substring func WaitForErroutToContain(substring string, timeoutInSeconds int, intervalInSeconds int, session *gexec.Session) { diff --git a/tests/integration/cmd_dev_debug_test.go b/tests/integration/cmd_dev_debug_test.go index ec56df2ccc1..e8c0dc9d485 100644 --- a/tests/integration/cmd_dev_debug_test.go +++ b/tests/integration/cmd_dev_debug_test.go @@ -97,7 +97,7 @@ var _ = Describe("odo dev debug command tests", func() { // Because of the Spinner, the "Building your application in container on cluster" is printed twice in the captured stdout. // The bracket allows to match the last occurrence with the command execution timing information. Expect(strings.Count(string(stdout), "Building your application in container on cluster (command: install) [")). - To(BeNumerically("==", 1)) + To(BeNumerically("==", 1), "\nOUTPUT: "+string(stdout)+"\n") }) By("verifying that the command did run successfully", func() { @@ -170,7 +170,7 @@ var _ = Describe("odo dev debug command tests", func() { out := string(stdout) for _, cmd := range []string{"mkdir", "sleep-cmd-build", "build-cmd"} { Expect(strings.Count(out, fmt.Sprintf("Building your application in container on cluster (command: %s) [", cmd))). - To(BeNumerically("==", 1)) + To(BeNumerically("==", 1), "\nOUTPUT: "+string(stdout)+"\n") } }) diff --git a/tests/integration/cmd_dev_test.go b/tests/integration/cmd_dev_test.go index d57eb9ef531..af0c7b31597 100644 --- a/tests/integration/cmd_dev_test.go +++ b/tests/integration/cmd_dev_test.go @@ -185,31 +185,50 @@ var _ = Describe("odo dev command tests", func() { }) }) + When("recording telemetry data", func() { + BeforeEach(func() { + helper.EnableTelemetryDebug() + session, _, _, _, _ := helper.StartDevMode() + session.Stop() + session.WaitEnd() + }) + AfterEach(func() { + helper.ResetTelemetry() + }) + It("should record the telemetry data correctly", func() { + td := helper.GetTelemetryDebugData() + Expect(td.Event).To(ContainSubstring("odo dev")) + Expect(td.Properties.Success).To(BeTrue()) + Expect(td.Properties.Error).ToNot(ContainSubstring("interrupt")) + Expect(td.Properties.CmdProperties[segment.ComponentType]).To(ContainSubstring("nodejs")) + Expect(td.Properties.CmdProperties[segment.Language]).To(ContainSubstring("nodejs")) + Expect(td.Properties.CmdProperties[segment.ProjectType]).To(ContainSubstring("nodejs")) + }) + }) + When("odo dev is executed", func() { + var devSession helper.DevSession + BeforeEach(func() { - devSession, _, _, _, err := helper.StartDevMode() + var err error + devSession, _, _, _, err = helper.StartDevMode() Expect(err).ToNot(HaveOccurred()) - defer func() { - devSession.Kill() - devSession.WaitEnd() - }() // An ENV file should have been created indicating current namespace Expect(helper.VerifyFileExists(".odo/env/env.yaml")).To(BeTrue()) helper.FileShouldContainSubstring(".odo/env/env.yaml", "Project: "+commonVar.Project) }) + AfterEach(func() { + devSession.Kill() + devSession.WaitEnd() + }) + When("deleting previous deployment and switching kubeconfig to another namespace", func() { var otherNS string BeforeEach(func() { - helper.Cmd("odo", "delete", "component", "--name", cmpName, "-f").ShouldPass() - output := commonVar.CliRunner.Run("get", "deployment", "-n", commonVar.Project).Err.Contents() - Expect(string(output)).To(ContainSubstring("No resources found in " + commonVar.Project + " namespace.")) - - Eventually(func() string { - return string(commonVar.CliRunner.Run("get", "pods", "-n", commonVar.Project).Err.Contents()) - }, 180, 10).Should(ContainSubstring("No resources found")) - + devSession.Stop() + devSession.WaitEnd() otherNS = commonVar.CliRunner.CreateAndSetRandNamespaceProject() }) @@ -228,34 +247,14 @@ var _ = Describe("odo dev command tests", func() { Expect(err).ToNot(HaveOccurred()) }) }) - When("recording telemetry data", func() { + When("odo dev is stopped", func() { BeforeEach(func() { - helper.EnableTelemetryDebug() - session, _, _, _, _ := helper.StartDevMode() - session.Stop() - session.WaitEnd() - }) - AfterEach(func() { - helper.ResetTelemetry() - }) - It("should record the telemetry data correctly", func() { - td := helper.GetTelemetryDebugData() - Expect(td.Event).To(ContainSubstring("odo dev")) - Expect(td.Properties.Success).To(BeTrue()) - Expect(td.Properties.Error).ToNot(ContainSubstring("interrupt")) - Expect(td.Properties.CmdProperties[segment.ComponentType]).To(ContainSubstring("nodejs")) - Expect(td.Properties.CmdProperties[segment.Language]).To(ContainSubstring("nodejs")) - Expect(td.Properties.CmdProperties[segment.ProjectType]).To(ContainSubstring("nodejs")) + devSession.Stop() + devSession.WaitEnd() }) - }) - When("odo dev is stopped", func() { + It("should delete component from the cluster", func() { deploymentName := fmt.Sprintf("%s-%s", cmpName, "app") - err := helper.RunDevMode(nil, func(session *gexec.Session, outContents, errContents []byte, ports map[string]string) { - out := commonVar.CliRunner.Run("get", "deployment", "-n", commonVar.Project).Out.Contents() - Expect(string(out)).To(ContainSubstring(deploymentName)) - }) - Expect(err).ToNot(HaveOccurred()) errout := commonVar.CliRunner.Run("get", "deployment", "-n", commonVar.Project).Err.Contents() Expect(string(errout)).ToNot(ContainSubstring(deploymentName)) }) @@ -359,18 +358,23 @@ var _ = Describe("odo dev command tests", func() { }) When("odo is executed with --no-watch flag", func() { + + var devSession helper.DevSession + BeforeEach(func() { - devSession, _, _, _, err := helper.StartDevMode("--no-watch") + var err error + devSession, _, _, _, err = helper.StartDevMode("--no-watch") Expect(err).ToNot(HaveOccurred()) - defer func() { - devSession.Kill() - devSession.WaitEnd() - }() // An ENV file should have been created indicating current namespace Expect(helper.VerifyFileExists(".odo/env/env.yaml")).To(BeTrue()) helper.FileShouldContainSubstring(".odo/env/env.yaml", "Project: "+commonVar.Project) }) + AfterEach(func() { + devSession.Kill() + devSession.WaitEnd() + }) + When("a file in component directory is modified", func() { It("should not trigger a push", func() { helper.ReplaceString(filepath.Join(commonVar.Context, "server.js"), "App started", "App is super started") @@ -1143,7 +1147,7 @@ var _ = Describe("odo dev command tests", func() { // Verify the pvc size for secondvol storageSize = commonVar.CliRunner.GetPVCSize(devfileCmpName, "secondvol", commonVar.Project) // should be the specified size in the devfile volume component - Expect(storageSize).To(ContainSubstring("3Gi")) + Expect(storageSize).To(ContainSubstring("200Mi")) }) }) @@ -1283,7 +1287,7 @@ var _ = Describe("odo dev command tests", func() { // Because of the Spinner, the "Building your application in container on cluster" is printed twice in the captured stdout. // The bracket allows to match the last occurrence with the command execution timing information. Expect(strings.Count(string(stdout), "Building your application in container on cluster (command: install) [")). - To(BeNumerically("==", 1)) + To(BeNumerically("==", 1), "\nOUTPUT: "+string(stdout)+"\n") }) By("verifying that the command did run successfully", func() { @@ -1348,7 +1352,7 @@ var _ = Describe("odo dev command tests", func() { out := string(stdout) for _, cmd := range []string{"mkdir", "sleep-cmd-build", "build-cmd"} { Expect(strings.Count(out, fmt.Sprintf("Building your application in container on cluster (command: %s) [", cmd))). - To(BeNumerically("==", 1)) + To(BeNumerically("==", 1), "\nOUTPUT: "+string(stdout)+"\n") } }) @@ -1404,12 +1408,12 @@ var _ = Describe("odo dev command tests", func() { }) }) - When("running odo dev and build command throws an error", func() { + When("running odo dev --no-watch and build command throws an error", func() { var stderr string BeforeEach(func() { helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml")) helper.ReplaceString(filepath.Join(commonVar.Context, "devfile.yaml"), "npm install", "npm install-does-not-exist") - stderr = helper.Cmd("odo", "dev", "--random-ports").ShouldFail().Err() + stderr = helper.Cmd("odo", "dev", "--no-watch", "--random-ports").ShouldFail().Err() }) It("should error out with some log", func() { @@ -1689,7 +1693,7 @@ var _ = Describe("odo dev command tests", func() { When("Update the devfile.yaml", func() { BeforeEach(func() { - helper.ReplaceString("devfile.yaml", "memoryLimit: 1024Mi", "memoryLimit: 1023Mi") + helper.ReplaceString("devfile.yaml", "memoryLimit: 768Mi", "memoryLimit: 767Mi") var err error _, _, _, err = session.WaitSync() Expect(err).ToNot(HaveOccurred()) @@ -1889,14 +1893,20 @@ var _ = Describe("odo dev command tests", func() { // cf. https://github.com/redhat-developer/odo/blob/24fd02673d25eb4c7bb166ec3369554a8e64b59c/tests/integration/devfile/cmd_devfile_delete_test.go#L172-L238 When("a component with endpoints is bootstrapped and pushed", func() { + var devSession helper.DevSession + BeforeEach(func() { cmpName = "nodejs-with-endpoints" helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context) helper.Cmd("odo", "init", "--name", cmpName, "--devfile-path", helper.GetExamplePath("source", "devfiles", "nodejs", "devfile-with-multiple-endpoints.yaml")).ShouldPass() - devSession, _, _, _, err := helper.StartDevMode() + var err error + devSession, _, _, _, err = helper.StartDevMode() Expect(err).ShouldNot(HaveOccurred()) + }) + + AfterEach(func() { devSession.Kill() devSession.WaitEnd() }) diff --git a/tests/integration/cmd_logs_test.go b/tests/integration/cmd_logs_test.go index e950ff97743..26e44e9937a 100644 --- a/tests/integration/cmd_logs_test.go +++ b/tests/integration/cmd_logs_test.go @@ -78,9 +78,13 @@ var _ = Describe("odo logs command tests", func() { BeforeEach(func() { devSession, _, _, _, err = helper.StartDevMode() Expect(err).ToNot(HaveOccurred()) + // We need to wait for the pod deployed as a Kubernetes component + Eventually(func() bool { + return areAllPodsRunning() + }).Should(Equal(true)) }) AfterEach(func() { - devSession.Kill() + devSession.Stop() devSession.WaitEnd() }) It("should successfully show logs of the running component", func() { @@ -178,7 +182,7 @@ var _ = Describe("odo logs command tests", func() { }).Should(Equal(true)) }) AfterEach(func() { - devSession.Kill() + devSession.Stop() devSession.WaitEnd() }) It("should successfully show logs of the running component", func() {