From 70aa5bbe643858e33c8962ff3b191971099f12ca Mon Sep 17 00:00:00 2001 From: Syulin7 <735122171@qq.com> Date: Thu, 18 Jan 2024 15:29:56 +0800 Subject: [PATCH] chore: fix go lint. Signed-off-by: Syulin7 <735122171@qq.com> --- Makefile | 8 + cmd/arena/main.go | 5 +- cmd/uninstall/uninstall.go | 45 +---- pkg/apis/arenaclient/cron_client.go | 4 +- pkg/apis/arenaclient/serving_client.go | 2 +- pkg/apis/attach/attach.go | 2 +- pkg/apis/config/arenaconfig.go | 14 -- pkg/apis/evaluate/evaluatejob_builder.go | 10 +- pkg/apis/model/benchmark_builder.go | 12 +- pkg/apis/model/evaluate_builder.go | 12 +- pkg/apis/model/optimize_builder.go | 12 +- pkg/apis/model/profile_builder.go | 12 +- pkg/apis/serving/custom_builder.go | 18 +- pkg/apis/serving/kserve_builder.go | 14 +- pkg/apis/serving/kubeflow_builder.go | 14 +- pkg/apis/serving/seldon_builder.go | 14 +- pkg/apis/serving/tensorflow_builder.go | 20 +- pkg/apis/serving/tensorrt_builder.go | 14 +- pkg/apis/serving/traffic_router_builder.go | 2 +- pkg/apis/serving/triton_builder.go | 20 +- pkg/apis/serving/update_custom_builder.go | 6 +- pkg/apis/serving/update_kserve_builder.go | 6 +- pkg/apis/serving/update_tensorflow_builder.go | 6 +- pkg/apis/serving/update_triton_builder.go | 6 +- pkg/apis/training/deepspeedjob_builder.go | 14 +- pkg/apis/training/etjob_builder.go | 14 +- pkg/apis/training/horovod_builder.go | 12 +- pkg/apis/training/mpijob_builder.go | 14 +- pkg/apis/training/pytorchjob_builder.go | 14 +- pkg/apis/training/sparkjob_builder.go | 4 +- pkg/apis/training/tfjob_builder.go | 24 +-- pkg/apis/training/volcano_builder.go | 4 +- pkg/apis/types/gpunode.go | 2 +- pkg/apis/utils/pods.go | 17 +- pkg/apis/utils/utils.go | 26 +-- pkg/argsbuilder/scale_etjob.go | 2 +- pkg/argsbuilder/serving.go | 15 +- pkg/argsbuilder/serving_kserve.go | 16 +- pkg/argsbuilder/serving_tensorflow.go | 36 +--- pkg/argsbuilder/submit.go | 11 +- pkg/argsbuilder/submit_deepspeedjob.go | 6 +- pkg/argsbuilder/submit_etjob.go | 6 +- pkg/argsbuilder/submit_sparkjob.go | 2 +- pkg/argsbuilder/submit_sync_code.go | 6 +- pkg/argsbuilder/submit_tensorboard.go | 2 +- pkg/argsbuilder/submit_tfjob.go | 33 ++-- pkg/argsbuilder/submit_volcanojob.go | 18 +- pkg/argsbuilder/traffic_router_builder.go | 9 +- pkg/argsbuilder/update_serving.go | 8 - pkg/argsbuilder/update_serving_tensorflow.go | 8 +- pkg/commands/cron/cron_tfjob.go | 2 +- pkg/commands/cron/delete.go | 2 +- pkg/commands/cron/get.go | 2 +- pkg/commands/cron/list.go | 2 +- pkg/commands/cron/resume.go | 2 +- pkg/commands/cron/suspend.go | 2 +- pkg/commands/data/list.go | 2 +- pkg/commands/evaluate/delete.go | 2 +- pkg/commands/evaluate/evaluate_model.go | 2 +- pkg/commands/evaluate/get.go | 2 +- pkg/commands/evaluate/list.go | 2 +- pkg/commands/model/delete.go | 2 +- pkg/commands/model/get.go | 2 +- pkg/commands/model/list.go | 2 +- pkg/commands/model/submit_benchmark.go | 2 +- pkg/commands/model/submit_evaluate.go | 2 +- pkg/commands/model/submit_optimize.go | 2 +- pkg/commands/model/submit_profile.go | 2 +- pkg/commands/serving/attach.go | 2 +- pkg/commands/serving/delete.go | 2 +- pkg/commands/serving/get.go | 9 +- pkg/commands/serving/list.go | 4 +- pkg/commands/serving/logs.go | 2 +- pkg/commands/serving/serving_custom.go | 2 +- pkg/commands/serving/serving_kserve.go | 2 +- pkg/commands/serving/serving_kubeflow.go | 2 +- pkg/commands/serving/serving_seldon.go | 2 +- pkg/commands/serving/serving_tensorflow.go | 2 +- pkg/commands/serving/serving_tensorrt.go | 2 +- pkg/commands/serving/serving_triton.go | 2 +- pkg/commands/serving/traffic_router_split.go | 2 +- pkg/commands/serving/update_custom.go | 2 +- pkg/commands/serving/update_kserve.go | 2 +- pkg/commands/serving/update_tensorflow.go | 2 +- pkg/commands/serving/update_triton.go | 2 +- pkg/commands/top/job.go | 4 +- pkg/commands/top/node.go | 4 +- pkg/commands/training/attach.go | 2 +- pkg/commands/training/delete.go | 11 +- pkg/commands/training/get.go | 15 +- pkg/commands/training/list.go | 6 +- pkg/commands/training/logs.go | 2 +- pkg/commands/training/logviewer.go | 2 +- pkg/commands/training/prune.go | 2 +- pkg/commands/training/scalein_etjob.go | 2 +- pkg/commands/training/scaleout_etjob.go | 2 +- pkg/commands/training/submit_deepspeedjob.go | 2 +- pkg/commands/training/submit_etjob.go | 2 +- pkg/commands/training/submit_horovodjob.go | 2 +- pkg/commands/training/submit_mpijob.go | 2 +- pkg/commands/training/submit_pytorchjob.go | 2 +- pkg/commands/training/submit_sparkjob.go | 2 +- pkg/commands/training/submit_tfjob.go | 2 +- pkg/commands/training/submit_volcanojob.go | 2 +- pkg/commands/version.go | 2 +- pkg/commands/whoami.go | 2 +- pkg/cron/util.go | 11 -- pkg/datahouse/list.go | 2 +- pkg/evaluate/util.go | 4 - pkg/k8saccesser/k8s_accesser.go | 27 +-- pkg/model/get.go | 2 +- pkg/model/model.go | 4 +- .../apis/tensorflow/v1alpha1/defaults.go | 2 +- .../apis/tensorflow/v1alpha2/defaults.go | 2 +- .../tensorflow/validation/validation_test.go | 6 +- .../clientset/versioned/scheme/register.go | 13 +- pkg/podexec/exec.go | 5 +- pkg/podlogs/logger.go | 8 +- pkg/prometheus/prom.go | 14 -- pkg/prometheus/query.go | 15 -- pkg/serving/get.go | 4 +- pkg/serving/list.go | 11 +- pkg/serving/serving.go | 9 +- pkg/serving/serving_kserve.go | 2 +- pkg/serving/serving_seldon.go | 4 +- pkg/serving/traffic_router_split.go | 14 +- pkg/serving/update.go | 39 +--- pkg/serving/util.go | 6 +- pkg/topnode/gpuexclusive.go | 52 +---- pkg/topnode/gpushare.go | 44 +---- pkg/topnode/gputopology.go | 87 +-------- pkg/topnode/normal.go | 5 +- pkg/training/const.go | 26 +-- pkg/training/dashboard_helper.go | 35 ++-- pkg/training/get.go | 32 ++-- pkg/training/get_advanced.go | 12 +- pkg/training/gpu.go | 48 +---- pkg/training/list.go | 4 +- pkg/training/logs.go | 12 +- pkg/training/pod_helper.go | 181 ------------------ pkg/training/prune.go | 7 +- pkg/training/top_job.go | 8 +- pkg/training/trainer_mpi.go | 19 -- pkg/training/trainer_pytorch.go | 25 --- pkg/training/trainer_spark.go | 20 -- pkg/training/trainer_tensorflow.go | 29 --- pkg/training/trainer_volcano.go | 17 -- pkg/util/charts.go | 5 +- pkg/util/config/loader.go | 2 - pkg/util/duration.go | 4 +- pkg/util/kubectl/kubectl.go | 5 +- pkg/util/port_allocate_test.go | 3 + pkg/util/resource.go | 4 +- pkg/util/retry.go | 2 +- pkg/util/validate.go | 8 - samples/sdk/custom-serving/main.go | 4 + samples/sdk/etjob/etjob.go | 4 + samples/sdk/mpijob/mpijob.go | 4 + 158 files changed, 465 insertions(+), 1199 deletions(-) diff --git a/Makefile b/Makefile index 18bebe9d1..852f312b8 100644 --- a/Makefile +++ b/Makefile @@ -132,5 +132,13 @@ fmt: ## Run go fmt against code. vet: ## Run go vet against code. go vet ./... +GOLANGCI_LINT=$(shell which golangci-lint) +golangci-lint: +ifeq ($(GOLANGCI_LINT),) + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.53.3 + $(info golangci-lint has been installed) +endif + golangci-lint run --timeout 5m --go 1.18 ./... + build-dependabot: python3 hack/create_dependabot.py diff --git a/cmd/arena/main.go b/cmd/arena/main.go index 993a3fee0..a2a317ca4 100644 --- a/cmd/arena/main.go +++ b/cmd/arena/main.go @@ -37,7 +37,10 @@ func main() { defer cpuf.Close() runtime.SetCPUProfileRate(getProfileHZ()) - pprof.StartCPUProfile(cpuf) + err = pprof.StartCPUProfile(cpuf) + if err != nil { + log.Fatal(err) + } log.Infof("Dump cpu profile file into /tmp/cpu_profile") defer pprof.StopCPUProfile() } diff --git a/cmd/uninstall/uninstall.go b/cmd/uninstall/uninstall.go index 3482fed6d..b6e483c3a 100644 --- a/cmd/uninstall/uninstall.go +++ b/cmd/uninstall/uninstall.go @@ -69,8 +69,9 @@ func deleteArenaArtifacts(force bool) error { return err } } - execCommand([]string{"arena-helm", "del", "arena-artifacts", "-n", *arenaNamespace}) - stdout, stderr, err = execCommand([]string{"arena-kubectl", "delete", "-f", tmpFile}) + stdout, stderr, _ = execCommand([]string{"arena-helm", "del", "arena-artifacts", "-n", *arenaNamespace}) + fmt.Printf("%v,%v\n", stdout, stderr) + stdout, stderr, _ = execCommand([]string{"arena-kubectl", "delete", "-f", tmpFile}) fmt.Printf("%v,%v\n", stdout, stderr) stdout, stderr, err = execCommand([]string{"arena-kubectl", "delete", "ns", *arenaNamespace}) if err != nil { @@ -232,46 +233,6 @@ func getAllCRDsInK8s() ([]string, error) { return crds, nil } -func deleteClientFiles() { - execCommand([]string{"rm", "-rf", "/charts"}) - execCommand([]string{"rm", "-rf", "~/charts"}) - execCommand([]string{"rm", "-rf", "/usr/local/bin/arena"}) - execCommand([]string{"rm", "-rf", "/usr/local/bin/arena-kubectl"}) - execCommand([]string{"rm", "-rf", "/usr/local/bin/arena-helm"}) - if err := removeLines([]string{"source <(arena completion bash)"}); err != nil { - fmt.Printf("Error: failed to remove line 'source <(arena completion bash)' from ~/bashrc or ~/.zshrc\n") - os.Exit(4) - } -} - -func removeLines(lines []string) error { - homeDir := os.Getenv("HOME") - bashFile := path.Join(homeDir, ".bashrc") - zshFile := path.Join(homeDir, ".zshrc") - updateFile := func(f string) error { - contentBytes, err := ioutil.ReadFile(f) - if err != nil { - return err - } - content := string(contentBytes) - for _, line := range lines { - content = strings.ReplaceAll(content, line, "") - } - return ioutil.WriteFile(f, []byte(content), 0744) - } - if CheckFileExist(zshFile) { - if err := updateFile(zshFile); err != nil { - return err - } - } - if CheckFileExist(bashFile) { - if err := updateFile(bashFile); err != nil { - return err - } - } - return nil -} - func execCommand(args []string) (string, string, error) { var stdout bytes.Buffer var stderr bytes.Buffer diff --git a/pkg/apis/arenaclient/cron_client.go b/pkg/apis/arenaclient/cron_client.go index f1fee89f0..aa8c5f757 100644 --- a/pkg/apis/arenaclient/cron_client.go +++ b/pkg/apis/arenaclient/cron_client.go @@ -96,7 +96,9 @@ func (c *CronClient) Delete(names ...string) error { continue } - cron.DeleteCron(name, c.namespace, cronInfo.Type) + if err := cron.DeleteCron(name, c.namespace, cronInfo.Type); err != nil { + return err + } } return nil diff --git a/pkg/apis/arenaclient/serving_client.go b/pkg/apis/arenaclient/serving_client.go index 2d222c225..8a9bfcf23 100644 --- a/pkg/apis/arenaclient/serving_client.go +++ b/pkg/apis/arenaclient/serving_client.go @@ -185,7 +185,7 @@ func (t *ServingJobClient) TrafficRouterSplit(args *types.TrafficRouterSplitArgs func moreThanOneInstanceHelpInfo(instances []types.ServingInstance) string { header := fmt.Sprintf("There is %d instances have been found:", len(instances)) lines := []string{} - footer := fmt.Sprintf("please use '-i' or '--instance' to filter.") + footer := "please use '-i' or '--instance' to filter." for _, i := range instances { lines = append(lines, fmt.Sprintf("%v", i.Name)) } diff --git a/pkg/apis/attach/attach.go b/pkg/apis/attach/attach.go index 72f45ce98..822a8f2cb 100644 --- a/pkg/apis/attach/attach.go +++ b/pkg/apis/attach/attach.go @@ -50,7 +50,7 @@ func (a *AttachBuilder) IOStreams(stream genericclioptions.IOStreams) *AttachBui } func (a *AttachBuilder) Command(command []string) *AttachBuilder { - if command != nil && len(command) != 0 { + if len(command) != 0 { a.args.Command = command } return a diff --git a/pkg/apis/config/arenaconfig.go b/pkg/apis/config/arenaconfig.go index e3d58a030..2818d6622 100644 --- a/pkg/apis/config/arenaconfig.go +++ b/pkg/apis/config/arenaconfig.go @@ -303,17 +303,3 @@ func isolateUserInNamespace(namespaceName string, clientSet *kubernetes.Clientse } return namespace.Labels[types.MultiTenantIsolationLabel] == "true", nil } - -func getClusterInstalledCRDs(client *extclientset.Clientset) ([]string, error) { - selectorListOpts := metav1.ListOptions{} - - list, err := client.ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), selectorListOpts) - if err != nil { - return nil, err - } - crds := []string{} - for _, crd := range list.Items { - crds = append(crds, crd.Name) - } - return crds, nil -} diff --git a/pkg/apis/evaluate/evaluatejob_builder.go b/pkg/apis/evaluate/evaluatejob_builder.go index 67407a81d..d0c19cdba 100644 --- a/pkg/apis/evaluate/evaluatejob_builder.go +++ b/pkg/apis/evaluate/evaluatejob_builder.go @@ -55,7 +55,7 @@ func (e *EvaluateJobBuilder) WorkingDir(dir string) *EvaluateJobBuilder { // Envs is used to set env of job containers,match option --env func (e *EvaluateJobBuilder) Envs(envs map[string]string) *EvaluateJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -81,7 +81,7 @@ func (e *EvaluateJobBuilder) Tolerations(tolerations []string) *EvaluateJobBuild // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (e *EvaluateJobBuilder) NodeSelectors(selectors map[string]string) *EvaluateJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -93,7 +93,7 @@ func (e *EvaluateJobBuilder) NodeSelectors(selectors map[string]string) *Evaluat // Annotations is used to add annotations for job pods,match option --annotation func (e *EvaluateJobBuilder) Annotations(annotations map[string]string) *EvaluateJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -105,7 +105,7 @@ func (e *EvaluateJobBuilder) Annotations(annotations map[string]string) *Evaluat // DataDirs is used to mount host files to job containers,match option --data-dir func (e *EvaluateJobBuilder) DataDirs(volumes map[string]string) *EvaluateJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -117,7 +117,7 @@ func (e *EvaluateJobBuilder) DataDirs(volumes map[string]string) *EvaluateJobBui // Datas is used to mount host files to job containers,match option --data func (e *EvaluateJobBuilder) Datas(volumes map[string]string) *EvaluateJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) diff --git a/pkg/apis/model/benchmark_builder.go b/pkg/apis/model/benchmark_builder.go index 44941b15a..ad0c1d21f 100644 --- a/pkg/apis/model/benchmark_builder.go +++ b/pkg/apis/model/benchmark_builder.go @@ -123,7 +123,7 @@ func (m *ModelBenchmarkArgsBuilder) Memory(memory string) *ModelBenchmarkArgsBui // Envs is used to set env of job containers,match option --env func (m *ModelBenchmarkArgsBuilder) Envs(envs map[string]string) *ModelBenchmarkArgsBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -141,7 +141,7 @@ func (m *ModelBenchmarkArgsBuilder) Tolerations(tolerations []string) *ModelBenc // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (m *ModelBenchmarkArgsBuilder) NodeSelectors(selectors map[string]string) *ModelBenchmarkArgsBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -153,7 +153,7 @@ func (m *ModelBenchmarkArgsBuilder) NodeSelectors(selectors map[string]string) * // Annotations is used to add annotations for job pods,match option --annotation func (m *ModelBenchmarkArgsBuilder) Annotations(annotations map[string]string) *ModelBenchmarkArgsBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -165,7 +165,7 @@ func (m *ModelBenchmarkArgsBuilder) Annotations(annotations map[string]string) * // Labels is used to add labels for job func (m *ModelBenchmarkArgsBuilder) Labels(labels map[string]string) *ModelBenchmarkArgsBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -177,7 +177,7 @@ func (m *ModelBenchmarkArgsBuilder) Labels(labels map[string]string) *ModelBench // Datas is used to mount k8s pvc to job pods,match option --data func (m *ModelBenchmarkArgsBuilder) Datas(volumes map[string]string) *ModelBenchmarkArgsBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -189,7 +189,7 @@ func (m *ModelBenchmarkArgsBuilder) Datas(volumes map[string]string) *ModelBench // DataDirs is used to mount host files to job containers,match option --data-dir func (m *ModelBenchmarkArgsBuilder) DataDirs(volumes map[string]string) *ModelBenchmarkArgsBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) diff --git a/pkg/apis/model/evaluate_builder.go b/pkg/apis/model/evaluate_builder.go index 8f553258b..6a56b332f 100644 --- a/pkg/apis/model/evaluate_builder.go +++ b/pkg/apis/model/evaluate_builder.go @@ -123,7 +123,7 @@ func (m *ModelEvaluateJobBuilder) Memory(memory string) *ModelEvaluateJobBuilder // Envs is used to set env of job containers,match option --env func (m *ModelEvaluateJobBuilder) Envs(envs map[string]string) *ModelEvaluateJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -141,7 +141,7 @@ func (m *ModelEvaluateJobBuilder) Tolerations(tolerations []string) *ModelEvalua // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (m *ModelEvaluateJobBuilder) NodeSelectors(selectors map[string]string) *ModelEvaluateJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -153,7 +153,7 @@ func (m *ModelEvaluateJobBuilder) NodeSelectors(selectors map[string]string) *Mo // Annotations is used to add annotations for job pods,match option --annotation func (m *ModelEvaluateJobBuilder) Annotations(annotations map[string]string) *ModelEvaluateJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -165,7 +165,7 @@ func (m *ModelEvaluateJobBuilder) Annotations(annotations map[string]string) *Mo // Labels is used to add labels for job func (m *ModelEvaluateJobBuilder) Labels(labels map[string]string) *ModelEvaluateJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -177,7 +177,7 @@ func (m *ModelEvaluateJobBuilder) Labels(labels map[string]string) *ModelEvaluat // Datas is used to mount k8s pvc to job pods,match option --data func (m *ModelEvaluateJobBuilder) Datas(volumes map[string]string) *ModelEvaluateJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -189,7 +189,7 @@ func (m *ModelEvaluateJobBuilder) Datas(volumes map[string]string) *ModelEvaluat // DataDirs is used to mount host files to job containers,match option --data-dir func (m *ModelEvaluateJobBuilder) DataDirs(volumes map[string]string) *ModelEvaluateJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) diff --git a/pkg/apis/model/optimize_builder.go b/pkg/apis/model/optimize_builder.go index 5c0d236b7..0c726c72a 100644 --- a/pkg/apis/model/optimize_builder.go +++ b/pkg/apis/model/optimize_builder.go @@ -123,7 +123,7 @@ func (m *ModelOptimizeJobBuilder) Memory(memory string) *ModelOptimizeJobBuilder // Envs is used to set env of job containers,match option --env func (m *ModelOptimizeJobBuilder) Envs(envs map[string]string) *ModelOptimizeJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -141,7 +141,7 @@ func (m *ModelOptimizeJobBuilder) Tolerations(tolerations []string) *ModelOptimi // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (m *ModelOptimizeJobBuilder) NodeSelectors(selectors map[string]string) *ModelOptimizeJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -153,7 +153,7 @@ func (m *ModelOptimizeJobBuilder) NodeSelectors(selectors map[string]string) *Mo // Annotations is used to add annotations for job pods,match option --annotation func (m *ModelOptimizeJobBuilder) Annotations(annotations map[string]string) *ModelOptimizeJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -165,7 +165,7 @@ func (m *ModelOptimizeJobBuilder) Annotations(annotations map[string]string) *Mo // Labels is used to add labels for job func (m *ModelOptimizeJobBuilder) Labels(labels map[string]string) *ModelOptimizeJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -177,7 +177,7 @@ func (m *ModelOptimizeJobBuilder) Labels(labels map[string]string) *ModelOptimiz // Datas is used to mount k8s pvc to job pods,match option --data func (m *ModelOptimizeJobBuilder) Datas(volumes map[string]string) *ModelOptimizeJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -189,7 +189,7 @@ func (m *ModelOptimizeJobBuilder) Datas(volumes map[string]string) *ModelOptimiz // DataDirs is used to mount host files to job containers,match option --data-dir func (m *ModelOptimizeJobBuilder) DataDirs(volumes map[string]string) *ModelOptimizeJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) diff --git a/pkg/apis/model/profile_builder.go b/pkg/apis/model/profile_builder.go index ca823fec6..d4a62d6be 100644 --- a/pkg/apis/model/profile_builder.go +++ b/pkg/apis/model/profile_builder.go @@ -123,7 +123,7 @@ func (m *ModelProfileJobBuilder) Memory(memory string) *ModelProfileJobBuilder { // Envs is used to set env of job containers,match option --env func (m *ModelProfileJobBuilder) Envs(envs map[string]string) *ModelProfileJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -141,7 +141,7 @@ func (m *ModelProfileJobBuilder) Tolerations(tolerations []string) *ModelProfile // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (m *ModelProfileJobBuilder) NodeSelectors(selectors map[string]string) *ModelProfileJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -153,7 +153,7 @@ func (m *ModelProfileJobBuilder) NodeSelectors(selectors map[string]string) *Mod // Annotations is used to add annotations for job pods,match option --annotation func (m *ModelProfileJobBuilder) Annotations(annotations map[string]string) *ModelProfileJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -165,7 +165,7 @@ func (m *ModelProfileJobBuilder) Annotations(annotations map[string]string) *Mod // Labels is used to add labels for job func (m *ModelProfileJobBuilder) Labels(labels map[string]string) *ModelProfileJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -177,7 +177,7 @@ func (m *ModelProfileJobBuilder) Labels(labels map[string]string) *ModelProfileJ // Datas is used to mount k8s pvc to job pods,match option --data func (m *ModelProfileJobBuilder) Datas(volumes map[string]string) *ModelProfileJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -189,7 +189,7 @@ func (m *ModelProfileJobBuilder) Datas(volumes map[string]string) *ModelProfileJ // DataDirs is used to mount host files to job containers,match option --data-dir func (m *ModelProfileJobBuilder) DataDirs(volumes map[string]string) *ModelProfileJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) diff --git a/pkg/apis/serving/custom_builder.go b/pkg/apis/serving/custom_builder.go index 6576816f7..b76726370 100644 --- a/pkg/apis/serving/custom_builder.go +++ b/pkg/apis/serving/custom_builder.go @@ -117,7 +117,7 @@ func (b *CustomServingJobBuilder) Memory(memory string) *CustomServingJobBuilder // Envs is used to set env of job containers,match option --env func (b *CustomServingJobBuilder) Envs(envs map[string]string) *CustomServingJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -163,7 +163,7 @@ func (b *CustomServingJobBuilder) Tolerations(tolerations []string) *CustomServi // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (b *CustomServingJobBuilder) NodeSelectors(selectors map[string]string) *CustomServingJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -175,7 +175,7 @@ func (b *CustomServingJobBuilder) NodeSelectors(selectors map[string]string) *Cu // Annotations is used to add annotations for job pods,match option --annotation func (b *CustomServingJobBuilder) Annotations(annotations map[string]string) *CustomServingJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -187,7 +187,7 @@ func (b *CustomServingJobBuilder) Annotations(annotations map[string]string) *Cu // Labels is used to add labels for job func (b *CustomServingJobBuilder) Labels(labels map[string]string) *CustomServingJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -199,7 +199,7 @@ func (b *CustomServingJobBuilder) Labels(labels map[string]string) *CustomServin // Datas is used to mount k8s pvc to job pods,match option --data func (b *CustomServingJobBuilder) Datas(volumes map[string]string) *CustomServingJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -211,7 +211,7 @@ func (b *CustomServingJobBuilder) Datas(volumes map[string]string) *CustomServin // DataSubPathExprs is used to mount k8s pvc subpath to job pods,match option data-subpath-expr func (b *CustomServingJobBuilder) DataSubPathExprs(exprs map[string]string) *CustomServingJobBuilder { - if exprs != nil && len(exprs) != 0 { + if len(exprs) != 0 { s := []string{} for key, value := range exprs { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -222,7 +222,7 @@ func (b *CustomServingJobBuilder) DataSubPathExprs(exprs map[string]string) *Cus } func (b *CustomServingJobBuilder) TempDirs(volumes map[string]string) *CustomServingJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -233,7 +233,7 @@ func (b *CustomServingJobBuilder) TempDirs(volumes map[string]string) *CustomSer } func (b *CustomServingJobBuilder) EmptyDirSubPathExprs(exprs map[string]string) *CustomServingJobBuilder { - if exprs != nil && len(exprs) != 0 { + if len(exprs) != 0 { s := []string{} for key, value := range exprs { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -245,7 +245,7 @@ func (b *CustomServingJobBuilder) EmptyDirSubPathExprs(exprs map[string]string) // DataDirs is used to mount host files to job containers,match option --data-dir func (b *CustomServingJobBuilder) DataDirs(volumes map[string]string) *CustomServingJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) diff --git a/pkg/apis/serving/kserve_builder.go b/pkg/apis/serving/kserve_builder.go index a62b91d22..ac3f8e37c 100644 --- a/pkg/apis/serving/kserve_builder.go +++ b/pkg/apis/serving/kserve_builder.go @@ -118,7 +118,7 @@ func (b *KServeJobBuilder) Memory(memory string) *KServeJobBuilder { // Envs is used to set env of job containers,match option --env func (b *KServeJobBuilder) Envs(envs map[string]string) *KServeJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -164,7 +164,7 @@ func (b *KServeJobBuilder) Tolerations(tolerations []string) *KServeJobBuilder { // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (b *KServeJobBuilder) NodeSelectors(selectors map[string]string) *KServeJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -176,7 +176,7 @@ func (b *KServeJobBuilder) NodeSelectors(selectors map[string]string) *KServeJob // Annotations is used to add annotations for job pods,match option --annotation func (b *KServeJobBuilder) Annotations(annotations map[string]string) *KServeJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -188,7 +188,7 @@ func (b *KServeJobBuilder) Annotations(annotations map[string]string) *KServeJob // Labels is used to add labels for job func (b *KServeJobBuilder) Labels(labels map[string]string) *KServeJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -200,7 +200,7 @@ func (b *KServeJobBuilder) Labels(labels map[string]string) *KServeJobBuilder { // Datas is used to mount k8s pvc to job pods,match option --data func (b *KServeJobBuilder) Datas(volumes map[string]string) *KServeJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -212,7 +212,7 @@ func (b *KServeJobBuilder) Datas(volumes map[string]string) *KServeJobBuilder { // DataDirs is used to mount host files to job containers,match option --data-dir func (b *KServeJobBuilder) DataDirs(volumes map[string]string) *KServeJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -224,7 +224,7 @@ func (b *KServeJobBuilder) DataDirs(volumes map[string]string) *KServeJobBuilder // ConfigFiles is used to mapping config files form local to job containers,match option --config-file func (b *KServeJobBuilder) ConfigFiles(files map[string]string) *KServeJobBuilder { - if files != nil && len(files) != 0 { + if len(files) != 0 { filesSlice := []string{} for localPath, containerPath := range files { filesSlice = append(filesSlice, fmt.Sprintf("%v:%v", localPath, containerPath)) diff --git a/pkg/apis/serving/kubeflow_builder.go b/pkg/apis/serving/kubeflow_builder.go index f9722a4f2..4de48c432 100644 --- a/pkg/apis/serving/kubeflow_builder.go +++ b/pkg/apis/serving/kubeflow_builder.go @@ -119,7 +119,7 @@ func (b *KFServingJobBuilder) Memory(memory string) *KFServingJobBuilder { // Envs is used to set env of job containers,match option --env func (b *KFServingJobBuilder) Envs(envs map[string]string) *KFServingJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -165,7 +165,7 @@ func (b *KFServingJobBuilder) Tolerations(tolerations []string) *KFServingJobBui // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (b *KFServingJobBuilder) NodeSelectors(selectors map[string]string) *KFServingJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -177,7 +177,7 @@ func (b *KFServingJobBuilder) NodeSelectors(selectors map[string]string) *KFServ // Annotations is used to add annotations for job pods,match option --annotation func (b *KFServingJobBuilder) Annotations(annotations map[string]string) *KFServingJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -189,7 +189,7 @@ func (b *KFServingJobBuilder) Annotations(annotations map[string]string) *KFServ // Labels is used to add labels for job func (b *KFServingJobBuilder) Labels(labels map[string]string) *KFServingJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -201,7 +201,7 @@ func (b *KFServingJobBuilder) Labels(labels map[string]string) *KFServingJobBuil // Datas is used to mount k8s pvc to job pods,match option --data func (b *KFServingJobBuilder) Datas(volumes map[string]string) *KFServingJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -213,7 +213,7 @@ func (b *KFServingJobBuilder) Datas(volumes map[string]string) *KFServingJobBuil // DataDirs is used to mount host files to job containers,match option --data-dir func (b *KFServingJobBuilder) DataDirs(volumes map[string]string) *KFServingJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -257,7 +257,7 @@ func (b *KFServingJobBuilder) StorageUri(uri string) *KFServingJobBuilder { // ConfigFiles is used to mapping config files form local to job containers,match option --config-file func (b *KFServingJobBuilder) ConfigFiles(files map[string]string) *KFServingJobBuilder { - if files != nil && len(files) != 0 { + if len(files) != 0 { filesSlice := []string{} for localPath, containerPath := range files { filesSlice = append(filesSlice, fmt.Sprintf("%v:%v", localPath, containerPath)) diff --git a/pkg/apis/serving/seldon_builder.go b/pkg/apis/serving/seldon_builder.go index e10b64e41..5af66b06e 100644 --- a/pkg/apis/serving/seldon_builder.go +++ b/pkg/apis/serving/seldon_builder.go @@ -119,7 +119,7 @@ func (b *SeldonJobBuilder) Memory(memory string) *SeldonJobBuilder { // Envs is used to set env of job containers,match option --env func (b *SeldonJobBuilder) Envs(envs map[string]string) *SeldonJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -165,7 +165,7 @@ func (b *SeldonJobBuilder) Tolerations(tolerations []string) *SeldonJobBuilder { // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (b *SeldonJobBuilder) NodeSelectors(selectors map[string]string) *SeldonJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -177,7 +177,7 @@ func (b *SeldonJobBuilder) NodeSelectors(selectors map[string]string) *SeldonJob // Annotations is used to add annotations for job pods,match option --annotation func (b *SeldonJobBuilder) Annotations(annotations map[string]string) *SeldonJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -189,7 +189,7 @@ func (b *SeldonJobBuilder) Annotations(annotations map[string]string) *SeldonJob // Labels is used to add labels for job func (b *SeldonJobBuilder) Labels(labels map[string]string) *SeldonJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -201,7 +201,7 @@ func (b *SeldonJobBuilder) Labels(labels map[string]string) *SeldonJobBuilder { // Datas is used to mount k8s pvc to job pods,match option --data func (b *SeldonJobBuilder) Datas(volumes map[string]string) *SeldonJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -213,7 +213,7 @@ func (b *SeldonJobBuilder) Datas(volumes map[string]string) *SeldonJobBuilder { // DataDirs is used to mount host files to job containers,match option --data-dir func (b *SeldonJobBuilder) DataDirs(volumes map[string]string) *SeldonJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -241,7 +241,7 @@ func (b *SeldonJobBuilder) ModelUri(modelUri string) *SeldonJobBuilder { // ConfigFiles is used to mapping config files form local to job containers,match option --config-file func (b *SeldonJobBuilder) ConfigFiles(files map[string]string) *SeldonJobBuilder { - if files != nil && len(files) != 0 { + if len(files) != 0 { filesSlice := []string{} for localPath, containerPath := range files { filesSlice = append(filesSlice, fmt.Sprintf("%v:%v", localPath, containerPath)) diff --git a/pkg/apis/serving/tensorflow_builder.go b/pkg/apis/serving/tensorflow_builder.go index 65ce4aefd..8cb92b3b7 100644 --- a/pkg/apis/serving/tensorflow_builder.go +++ b/pkg/apis/serving/tensorflow_builder.go @@ -121,7 +121,7 @@ func (b *TFServingJobBuilder) Memory(memory string) *TFServingJobBuilder { // Envs is used to set env of job containers,match option --env func (b *TFServingJobBuilder) Envs(envs map[string]string) *TFServingJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -183,7 +183,7 @@ func (b *TFServingJobBuilder) Tolerations(tolerations []string) *TFServingJobBui // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (b *TFServingJobBuilder) NodeSelectors(selectors map[string]string) *TFServingJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -195,7 +195,7 @@ func (b *TFServingJobBuilder) NodeSelectors(selectors map[string]string) *TFServ // Annotations is used to add annotations for job pods,match option --annotation func (b *TFServingJobBuilder) Annotations(annotations map[string]string) *TFServingJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -207,7 +207,7 @@ func (b *TFServingJobBuilder) Annotations(annotations map[string]string) *TFServ // Labels is used to add labels for job func (b *TFServingJobBuilder) Labels(labels map[string]string) *TFServingJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -219,7 +219,7 @@ func (b *TFServingJobBuilder) Labels(labels map[string]string) *TFServingJobBuil // Datas is used to mount k8s pvc to job pods,match option --data func (b *TFServingJobBuilder) Datas(volumes map[string]string) *TFServingJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -231,7 +231,7 @@ func (b *TFServingJobBuilder) Datas(volumes map[string]string) *TFServingJobBuil // DataSubPathExprs is used to mount k8s pvc subpath to job pods,match option data-subpath-expr func (b *TFServingJobBuilder) DataSubPathExprs(exprs map[string]string) *TFServingJobBuilder { - if exprs != nil && len(exprs) != 0 { + if len(exprs) != 0 { s := []string{} for key, value := range exprs { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -243,7 +243,7 @@ func (b *TFServingJobBuilder) DataSubPathExprs(exprs map[string]string) *TFServi // TempDirs specify the deployment empty dir func (b *TFServingJobBuilder) TempDirs(volumes map[string]string) *TFServingJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -255,7 +255,7 @@ func (b *TFServingJobBuilder) TempDirs(volumes map[string]string) *TFServingJobB // EmptyDirSubPathExprs specify the datasource subpath to mount to the pod by expression func (b *TFServingJobBuilder) EmptyDirSubPathExprs(exprs map[string]string) *TFServingJobBuilder { - if exprs != nil && len(exprs) != 0 { + if len(exprs) != 0 { s := []string{} for key, value := range exprs { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -267,7 +267,7 @@ func (b *TFServingJobBuilder) EmptyDirSubPathExprs(exprs map[string]string) *TFS // DataDirs is used to mount host files to job containers,match option --data-dir func (b *TFServingJobBuilder) DataDirs(volumes map[string]string) *TFServingJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -319,7 +319,7 @@ func (b *TFServingJobBuilder) ModelPath(path string) *TFServingJobBuilder { // ConfigFiles is used to mapping config files form local to job containers,match option --config-file func (b *TFServingJobBuilder) ConfigFiles(files map[string]string) *TFServingJobBuilder { - if files != nil && len(files) != 0 { + if len(files) != 0 { filesSlice := []string{} for localPath, containerPath := range files { filesSlice = append(filesSlice, fmt.Sprintf("%v:%v", localPath, containerPath)) diff --git a/pkg/apis/serving/tensorrt_builder.go b/pkg/apis/serving/tensorrt_builder.go index 02a5077e3..7aeca13b1 100644 --- a/pkg/apis/serving/tensorrt_builder.go +++ b/pkg/apis/serving/tensorrt_builder.go @@ -122,7 +122,7 @@ func (b *TRTServingJobBuilder) Memory(memory string) *TRTServingJobBuilder { // Envs is used to set env of job containers,match option --env func (b *TRTServingJobBuilder) Envs(envs map[string]string) *TRTServingJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -168,7 +168,7 @@ func (b *TRTServingJobBuilder) Tolerations(tolerations []string) *TRTServingJobB // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (b *TRTServingJobBuilder) NodeSelectors(selectors map[string]string) *TRTServingJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -180,7 +180,7 @@ func (b *TRTServingJobBuilder) NodeSelectors(selectors map[string]string) *TRTSe // Annotations is used to add annotations for job pods,match option --annotation func (b *TRTServingJobBuilder) Annotations(annotations map[string]string) *TRTServingJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -192,7 +192,7 @@ func (b *TRTServingJobBuilder) Annotations(annotations map[string]string) *TRTSe // Labels is used to add labels for job func (b *TRTServingJobBuilder) Labels(labels map[string]string) *TRTServingJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -204,7 +204,7 @@ func (b *TRTServingJobBuilder) Labels(labels map[string]string) *TRTServingJobBu // Datas is used to mount k8s pvc to job pods,match option --data func (b *TRTServingJobBuilder) Datas(volumes map[string]string) *TRTServingJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -216,7 +216,7 @@ func (b *TRTServingJobBuilder) Datas(volumes map[string]string) *TRTServingJobBu // DataDirs is used to mount host files to job containers,match option --data-dir func (b *TRTServingJobBuilder) DataDirs(volumes map[string]string) *TRTServingJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -266,7 +266,7 @@ func (b *TRTServingJobBuilder) AllowMetrics() *TRTServingJobBuilder { // ConfigFiles is used to mapping config files form local to job containers,match option --config-file func (b *TRTServingJobBuilder) ConfigFiles(files map[string]string) *TRTServingJobBuilder { - if files != nil && len(files) != 0 { + if len(files) != 0 { filesSlice := []string{} for localPath, containerPath := range files { filesSlice = append(filesSlice, fmt.Sprintf("%v:%v", localPath, containerPath)) diff --git a/pkg/apis/serving/traffic_router_builder.go b/pkg/apis/serving/traffic_router_builder.go index 2211e6bec..16e4e54de 100644 --- a/pkg/apis/serving/traffic_router_builder.go +++ b/pkg/apis/serving/traffic_router_builder.go @@ -42,7 +42,7 @@ func (b *TrafficRouterBuilder) Namespace(namespace string) *TrafficRouterBuilder // VersionWeight is used to set version weight func (b *TrafficRouterBuilder) VersionWeight(weights []types.ServingVersionWeight) *TrafficRouterBuilder { - if weights != nil && len(weights) != 0 { + if len(weights) != 0 { versionWeithts := []string{} for _, v := range weights { versionWeithts = append(versionWeithts, fmt.Sprintf("%v:%v", v.Version, v.Weight)) diff --git a/pkg/apis/serving/triton_builder.go b/pkg/apis/serving/triton_builder.go index 7f0bece57..afeb9d555 100644 --- a/pkg/apis/serving/triton_builder.go +++ b/pkg/apis/serving/triton_builder.go @@ -122,7 +122,7 @@ func (b *TritonServingJobBuilder) Memory(memory string) *TritonServingJobBuilder // Envs is used to set env of job containers,match option --env func (b *TritonServingJobBuilder) Envs(envs map[string]string) *TritonServingJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -168,7 +168,7 @@ func (b *TritonServingJobBuilder) Tolerations(tolerations []string) *TritonServi // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (b *TritonServingJobBuilder) NodeSelectors(selectors map[string]string) *TritonServingJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -180,7 +180,7 @@ func (b *TritonServingJobBuilder) NodeSelectors(selectors map[string]string) *Tr // Annotations is used to add annotations for job pods,match option --annotation func (b *TritonServingJobBuilder) Annotations(annotations map[string]string) *TritonServingJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -192,7 +192,7 @@ func (b *TritonServingJobBuilder) Annotations(annotations map[string]string) *Tr // Labels is used to add labels for job func (b *TritonServingJobBuilder) Labels(labels map[string]string) *TritonServingJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -204,7 +204,7 @@ func (b *TritonServingJobBuilder) Labels(labels map[string]string) *TritonServin // Datas is used to mount k8s pvc to job pods,match option --data func (b *TritonServingJobBuilder) Datas(volumes map[string]string) *TritonServingJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -216,7 +216,7 @@ func (b *TritonServingJobBuilder) Datas(volumes map[string]string) *TritonServin // DataSubPathExprs is used to mount k8s pvc subpath to job pods,match option data-subpath-expr func (b *TritonServingJobBuilder) DataSubPathExprs(exprs map[string]string) *TritonServingJobBuilder { - if exprs != nil && len(exprs) != 0 { + if len(exprs) != 0 { s := []string{} for key, value := range exprs { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -228,7 +228,7 @@ func (b *TritonServingJobBuilder) DataSubPathExprs(exprs map[string]string) *Tri // TempDirs specify the deployment empty dir func (b *TritonServingJobBuilder) TempDirs(volumes map[string]string) *TritonServingJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -240,7 +240,7 @@ func (b *TritonServingJobBuilder) TempDirs(volumes map[string]string) *TritonSer // EmptyDirSubPathExprs specify the datasource subpath to mount to the pod by expression func (b *TritonServingJobBuilder) EmptyDirSubPathExprs(exprs map[string]string) *TritonServingJobBuilder { - if exprs != nil && len(exprs) != 0 { + if len(exprs) != 0 { s := []string{} for key, value := range exprs { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -252,7 +252,7 @@ func (b *TritonServingJobBuilder) EmptyDirSubPathExprs(exprs map[string]string) // DataDirs is used to mount host files to job containers,match option --data-dir func (b *TritonServingJobBuilder) DataDirs(volumes map[string]string) *TritonServingJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -302,7 +302,7 @@ func (b *TritonServingJobBuilder) AllowMetrics() *TritonServingJobBuilder { // ConfigFiles is used to mapping config files form local to job containers,match option --config-file func (b *TritonServingJobBuilder) ConfigFiles(files map[string]string) *TritonServingJobBuilder { - if files != nil && len(files) != 0 { + if len(files) != 0 { filesSlice := []string{} for localPath, containerPath := range files { filesSlice = append(filesSlice, fmt.Sprintf("%v:%v", localPath, containerPath)) diff --git a/pkg/apis/serving/update_custom_builder.go b/pkg/apis/serving/update_custom_builder.go index b90e1f13f..16701d334 100644 --- a/pkg/apis/serving/update_custom_builder.go +++ b/pkg/apis/serving/update_custom_builder.go @@ -59,7 +59,7 @@ func (b *UpdateCustomServingJobBuilder) Image(image string) *UpdateCustomServing // Envs is used to set env of job containers,match option --env func (b *UpdateCustomServingJobBuilder) Envs(envs map[string]string) *UpdateCustomServingJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -71,7 +71,7 @@ func (b *UpdateCustomServingJobBuilder) Envs(envs map[string]string) *UpdateCust // Annotations is used to add annotations for job pods,match option --annotation func (b *UpdateCustomServingJobBuilder) Annotations(annotations map[string]string) *UpdateCustomServingJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -83,7 +83,7 @@ func (b *UpdateCustomServingJobBuilder) Annotations(annotations map[string]strin // Labels is used to add labels for job func (b *UpdateCustomServingJobBuilder) Labels(labels map[string]string) *UpdateCustomServingJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) diff --git a/pkg/apis/serving/update_kserve_builder.go b/pkg/apis/serving/update_kserve_builder.go index fc5a92606..ef09fb80b 100644 --- a/pkg/apis/serving/update_kserve_builder.go +++ b/pkg/apis/serving/update_kserve_builder.go @@ -59,7 +59,7 @@ func (b *UpdateKServeJobBuilder) Image(image string) *UpdateKServeJobBuilder { // Envs is used to set env of job containers,match option --env func (b *UpdateKServeJobBuilder) Envs(envs map[string]string) *UpdateKServeJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -71,7 +71,7 @@ func (b *UpdateKServeJobBuilder) Envs(envs map[string]string) *UpdateKServeJobBu // Annotations is used to add annotations for job pods,match option --annotation func (b *UpdateKServeJobBuilder) Annotations(annotations map[string]string) *UpdateKServeJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -83,7 +83,7 @@ func (b *UpdateKServeJobBuilder) Annotations(annotations map[string]string) *Upd // Labels is used to add labels for job func (b *UpdateKServeJobBuilder) Labels(labels map[string]string) *UpdateKServeJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) diff --git a/pkg/apis/serving/update_tensorflow_builder.go b/pkg/apis/serving/update_tensorflow_builder.go index 678683637..78849d2dc 100644 --- a/pkg/apis/serving/update_tensorflow_builder.go +++ b/pkg/apis/serving/update_tensorflow_builder.go @@ -69,7 +69,7 @@ func (b *UpdateTFServingJobBuilder) Image(image string) *UpdateTFServingJobBuild // Envs is used to set env of job containers,match option --env func (b *UpdateTFServingJobBuilder) Envs(envs map[string]string) *UpdateTFServingJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -81,7 +81,7 @@ func (b *UpdateTFServingJobBuilder) Envs(envs map[string]string) *UpdateTFServin // Annotations is used to add annotations for job pods,match option --annotation func (b *UpdateTFServingJobBuilder) Annotations(annotations map[string]string) *UpdateTFServingJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -93,7 +93,7 @@ func (b *UpdateTFServingJobBuilder) Annotations(annotations map[string]string) * // Labels is used to add labels for job func (b *UpdateTFServingJobBuilder) Labels(labels map[string]string) *UpdateTFServingJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) diff --git a/pkg/apis/serving/update_triton_builder.go b/pkg/apis/serving/update_triton_builder.go index d31247a2e..76846961d 100644 --- a/pkg/apis/serving/update_triton_builder.go +++ b/pkg/apis/serving/update_triton_builder.go @@ -69,7 +69,7 @@ func (b *UpdateTritonServingJobBuilder) Image(image string) *UpdateTritonServing // Envs is used to set env of job containers,match option --env func (b *UpdateTritonServingJobBuilder) Envs(envs map[string]string) *UpdateTritonServingJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -81,7 +81,7 @@ func (b *UpdateTritonServingJobBuilder) Envs(envs map[string]string) *UpdateTrit // Annotations is used to add annotations for job pods,match option --annotation func (b *UpdateTritonServingJobBuilder) Annotations(annotations map[string]string) *UpdateTritonServingJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -93,7 +93,7 @@ func (b *UpdateTritonServingJobBuilder) Annotations(annotations map[string]strin // Labels is used to add labels for job func (b *UpdateTritonServingJobBuilder) Labels(labels map[string]string) *UpdateTritonServingJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) diff --git a/pkg/apis/training/deepspeedjob_builder.go b/pkg/apis/training/deepspeedjob_builder.go index 3ce1e6dd6..33803b94b 100644 --- a/pkg/apis/training/deepspeedjob_builder.go +++ b/pkg/apis/training/deepspeedjob_builder.go @@ -73,7 +73,7 @@ func (b *DeepSpeedJobBuilder) WorkingDir(dir string) *DeepSpeedJobBuilder { // Envs is used to set env of job containers,match option --env func (b *DeepSpeedJobBuilder) Envs(envs map[string]string) *DeepSpeedJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -107,7 +107,7 @@ func (b *DeepSpeedJobBuilder) Tolerations(tolerations []string) *DeepSpeedJobBui // ConfigFiles is used to mapping config files form local to job containers,match option --config-file func (b *DeepSpeedJobBuilder) ConfigFiles(files map[string]string) *DeepSpeedJobBuilder { - if files != nil && len(files) != 0 { + if len(files) != 0 { filesSlice := []string{} for localPath, containerPath := range files { filesSlice = append(filesSlice, fmt.Sprintf("%v:%v", localPath, containerPath)) @@ -119,7 +119,7 @@ func (b *DeepSpeedJobBuilder) ConfigFiles(files map[string]string) *DeepSpeedJob // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (b *DeepSpeedJobBuilder) NodeSelectors(selectors map[string]string) *DeepSpeedJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -131,7 +131,7 @@ func (b *DeepSpeedJobBuilder) NodeSelectors(selectors map[string]string) *DeepSp // Annotations is used to add annotations for job pods,match option --annotation func (b *DeepSpeedJobBuilder) Annotations(annotations map[string]string) *DeepSpeedJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -143,7 +143,7 @@ func (b *DeepSpeedJobBuilder) Annotations(annotations map[string]string) *DeepSp // Labels is used to add labels for job func (b *DeepSpeedJobBuilder) Labels(labels map[string]string) *DeepSpeedJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -155,7 +155,7 @@ func (b *DeepSpeedJobBuilder) Labels(labels map[string]string) *DeepSpeedJobBuil // Datas is used to mount k8s pvc to job pods,match option --data func (b *DeepSpeedJobBuilder) Datas(volumes map[string]string) *DeepSpeedJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -167,7 +167,7 @@ func (b *DeepSpeedJobBuilder) Datas(volumes map[string]string) *DeepSpeedJobBuil // DataDirs is used to mount host files to job containers,match option --data-dir func (b *DeepSpeedJobBuilder) DataDirs(volumes map[string]string) *DeepSpeedJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) diff --git a/pkg/apis/training/etjob_builder.go b/pkg/apis/training/etjob_builder.go index d2c71fc98..f988ac568 100644 --- a/pkg/apis/training/etjob_builder.go +++ b/pkg/apis/training/etjob_builder.go @@ -61,7 +61,7 @@ func (b *ETJobBuilder) WorkingDir(dir string) *ETJobBuilder { // Envs is used to set env of job containers,match option --env func (b *ETJobBuilder) Envs(envs map[string]string) *ETJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -95,7 +95,7 @@ func (b *ETJobBuilder) Tolerations(tolerations []string) *ETJobBuilder { // ConfigFiles is used to mapping config files form local to job containers,match option --config-file func (b *ETJobBuilder) ConfigFiles(files map[string]string) *ETJobBuilder { - if files != nil && len(files) != 0 { + if len(files) != 0 { filesSlice := []string{} for localPath, containerPath := range files { filesSlice = append(filesSlice, fmt.Sprintf("%v:%v", localPath, containerPath)) @@ -107,7 +107,7 @@ func (b *ETJobBuilder) ConfigFiles(files map[string]string) *ETJobBuilder { // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (b *ETJobBuilder) NodeSelectors(selectors map[string]string) *ETJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -119,7 +119,7 @@ func (b *ETJobBuilder) NodeSelectors(selectors map[string]string) *ETJobBuilder // Annotations is used to add annotations for job pods,match option --annotation func (b *ETJobBuilder) Annotations(annotations map[string]string) *ETJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -131,7 +131,7 @@ func (b *ETJobBuilder) Annotations(annotations map[string]string) *ETJobBuilder // Labels is used to add labels for job func (b *ETJobBuilder) Labels(labels map[string]string) *ETJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -143,7 +143,7 @@ func (b *ETJobBuilder) Labels(labels map[string]string) *ETJobBuilder { // Datas is used to mount k8s pvc to job pods,match option --data func (b *ETJobBuilder) Datas(volumes map[string]string) *ETJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -155,7 +155,7 @@ func (b *ETJobBuilder) Datas(volumes map[string]string) *ETJobBuilder { // DataDirs is used to mount host files to job containers,match option --data-dir func (b *ETJobBuilder) DataDirs(volumes map[string]string) *ETJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) diff --git a/pkg/apis/training/horovod_builder.go b/pkg/apis/training/horovod_builder.go index 95c8c0800..51f6515dd 100644 --- a/pkg/apis/training/horovod_builder.go +++ b/pkg/apis/training/horovod_builder.go @@ -59,7 +59,7 @@ func (b *HorovodJobBuilder) WorkingDir(dir string) *HorovodJobBuilder { // Envs is used to set env of job containers,match option --env func (b *HorovodJobBuilder) Envs(envs map[string]string) *HorovodJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -93,7 +93,7 @@ func (b *HorovodJobBuilder) Tolerations(tolerations []string) *HorovodJobBuilder // ConfigFiles is used to mapping config files form local to job containers,match option --config-file func (b *HorovodJobBuilder) ConfigFiles(files map[string]string) *HorovodJobBuilder { - if files != nil && len(files) != 0 { + if len(files) != 0 { filesSlice := []string{} for localPath, containerPath := range files { filesSlice = append(filesSlice, fmt.Sprintf("%v:%v", localPath, containerPath)) @@ -105,7 +105,7 @@ func (b *HorovodJobBuilder) ConfigFiles(files map[string]string) *HorovodJobBuil // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (b *HorovodJobBuilder) NodeSelectors(selectors map[string]string) *HorovodJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -117,7 +117,7 @@ func (b *HorovodJobBuilder) NodeSelectors(selectors map[string]string) *HorovodJ // Annotations is used to add annotations for job pods,match option --annotation func (b *HorovodJobBuilder) Annotations(annotations map[string]string) *HorovodJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -129,7 +129,7 @@ func (b *HorovodJobBuilder) Annotations(annotations map[string]string) *HorovodJ // Datas is used to mount k8s pvc to job pods,match option --data func (b *HorovodJobBuilder) Datas(volumes map[string]string) *HorovodJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -141,7 +141,7 @@ func (b *HorovodJobBuilder) Datas(volumes map[string]string) *HorovodJobBuilder // DataDirs is used to mount host files to job containers,match option --data-dir func (b *HorovodJobBuilder) DataDirs(volumes map[string]string) *HorovodJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) diff --git a/pkg/apis/training/mpijob_builder.go b/pkg/apis/training/mpijob_builder.go index d6be7f4e0..35c86c383 100644 --- a/pkg/apis/training/mpijob_builder.go +++ b/pkg/apis/training/mpijob_builder.go @@ -60,7 +60,7 @@ func (b *MPIJobBuilder) WorkingDir(dir string) *MPIJobBuilder { // Envs is used to set env of job containers,match option --env func (b *MPIJobBuilder) Envs(envs map[string]string) *MPIJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -94,7 +94,7 @@ func (b *MPIJobBuilder) Tolerations(tolerations []string) *MPIJobBuilder { // ConfigFiles is used to mapping config files form local to job containers,match option --config-file func (b *MPIJobBuilder) ConfigFiles(files map[string]string) *MPIJobBuilder { - if files != nil && len(files) != 0 { + if len(files) != 0 { filesSlice := []string{} for localPath, containerPath := range files { filesSlice = append(filesSlice, fmt.Sprintf("%v:%v", localPath, containerPath)) @@ -106,7 +106,7 @@ func (b *MPIJobBuilder) ConfigFiles(files map[string]string) *MPIJobBuilder { // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (b *MPIJobBuilder) NodeSelectors(selectors map[string]string) *MPIJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -118,7 +118,7 @@ func (b *MPIJobBuilder) NodeSelectors(selectors map[string]string) *MPIJobBuilde // Annotations is used to add annotations for job pods,match option --annotation func (b *MPIJobBuilder) Annotations(annotations map[string]string) *MPIJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -130,7 +130,7 @@ func (b *MPIJobBuilder) Annotations(annotations map[string]string) *MPIJobBuilde // Labels is used to add labels for job func (b *MPIJobBuilder) Labels(labels map[string]string) *MPIJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -142,7 +142,7 @@ func (b *MPIJobBuilder) Labels(labels map[string]string) *MPIJobBuilder { // Datas is used to mount k8s pvc to job pods,match option --data func (b *MPIJobBuilder) Datas(volumes map[string]string) *MPIJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -154,7 +154,7 @@ func (b *MPIJobBuilder) Datas(volumes map[string]string) *MPIJobBuilder { // DataDirs is used to mount host files to job containers,match option --data-dir func (b *MPIJobBuilder) DataDirs(volumes map[string]string) *MPIJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) diff --git a/pkg/apis/training/pytorchjob_builder.go b/pkg/apis/training/pytorchjob_builder.go index a17dafbf9..83cd5519f 100644 --- a/pkg/apis/training/pytorchjob_builder.go +++ b/pkg/apis/training/pytorchjob_builder.go @@ -60,7 +60,7 @@ func (b *PytorchJobBuilder) WorkingDir(dir string) *PytorchJobBuilder { // Envs is used to set env of job containers,match option --env func (b *PytorchJobBuilder) Envs(envs map[string]string) *PytorchJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -94,7 +94,7 @@ func (b *PytorchJobBuilder) Tolerations(tolerations []string) *PytorchJobBuilder // ConfigFiles is used to mapping config files form local to job containers,match option --config-file func (b *PytorchJobBuilder) ConfigFiles(files map[string]string) *PytorchJobBuilder { - if files != nil && len(files) != 0 { + if len(files) != 0 { filesSlice := []string{} for localPath, containerPath := range files { filesSlice = append(filesSlice, fmt.Sprintf("%v:%v", localPath, containerPath)) @@ -106,7 +106,7 @@ func (b *PytorchJobBuilder) ConfigFiles(files map[string]string) *PytorchJobBuil // NodeSelectors is used to set node selectors for scheduling job,match option --selector func (b *PytorchJobBuilder) NodeSelectors(selectors map[string]string) *PytorchJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -118,7 +118,7 @@ func (b *PytorchJobBuilder) NodeSelectors(selectors map[string]string) *PytorchJ // Annotations is used to add annotations for job pods,match option --annotation func (b *PytorchJobBuilder) Annotations(annotations map[string]string) *PytorchJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -130,7 +130,7 @@ func (b *PytorchJobBuilder) Annotations(annotations map[string]string) *PytorchJ // Labels is used to add labels for job func (b *PytorchJobBuilder) Labels(labels map[string]string) *PytorchJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -142,7 +142,7 @@ func (b *PytorchJobBuilder) Labels(labels map[string]string) *PytorchJobBuilder // Datas is used to mount k8s pvc to job pods,match option --data func (b *PytorchJobBuilder) Datas(volumes map[string]string) *PytorchJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -154,7 +154,7 @@ func (b *PytorchJobBuilder) Datas(volumes map[string]string) *PytorchJobBuilder // DataDirs is used to mount host files to job containers,match option --data-dir func (b *PytorchJobBuilder) DataDirs(volumes map[string]string) *PytorchJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) diff --git a/pkg/apis/training/sparkjob_builder.go b/pkg/apis/training/sparkjob_builder.go index f6f31b5fd..106d1b75c 100644 --- a/pkg/apis/training/sparkjob_builder.go +++ b/pkg/apis/training/sparkjob_builder.go @@ -100,7 +100,7 @@ func (b *SparkJobBuilder) ExecutorMemoryRequest(memory string) *SparkJobBuilder } func (b *SparkJobBuilder) Labels(labels map[string]string) *SparkJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -111,7 +111,7 @@ func (b *SparkJobBuilder) Labels(labels map[string]string) *SparkJobBuilder { } func (b *SparkJobBuilder) Annotations(annotations map[string]string) *SparkJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) diff --git a/pkg/apis/training/tfjob_builder.go b/pkg/apis/training/tfjob_builder.go index f0d668075..e1724534e 100644 --- a/pkg/apis/training/tfjob_builder.go +++ b/pkg/apis/training/tfjob_builder.go @@ -62,7 +62,7 @@ func (b *TFJobBuilder) WorkingDir(dir string) *TFJobBuilder { } func (b *TFJobBuilder) Envs(envs map[string]string) *TFJobBuilder { - if envs != nil && len(envs) != 0 { + if len(envs) != 0 { envSlice := []string{} for key, value := range envs { envSlice = append(envSlice, fmt.Sprintf("%v=%v", key, value)) @@ -92,7 +92,7 @@ func (b *TFJobBuilder) Tolerations(tolerations []string) *TFJobBuilder { } func (b *TFJobBuilder) ConfigFiles(files map[string]string) *TFJobBuilder { - if files != nil && len(files) != 0 { + if len(files) != 0 { filesSlice := []string{} for localPath, containerPath := range files { filesSlice = append(filesSlice, fmt.Sprintf("%v:%v", localPath, containerPath)) @@ -103,7 +103,7 @@ func (b *TFJobBuilder) ConfigFiles(files map[string]string) *TFJobBuilder { } func (b *TFJobBuilder) NodeSelectors(selectors map[string]string) *TFJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { selectorsSlice := []string{} for key, value := range selectors { selectorsSlice = append(selectorsSlice, fmt.Sprintf("%v=%v", key, value)) @@ -114,7 +114,7 @@ func (b *TFJobBuilder) NodeSelectors(selectors map[string]string) *TFJobBuilder } func (b *TFJobBuilder) Annotations(annotations map[string]string) *TFJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -125,7 +125,7 @@ func (b *TFJobBuilder) Annotations(annotations map[string]string) *TFJobBuilder } func (b *TFJobBuilder) Labels(labels map[string]string) *TFJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -176,7 +176,7 @@ func (b *TFJobBuilder) ChiefPort(port int) *TFJobBuilder { } func (b *TFJobBuilder) ChiefSelectors(selectors map[string]string) *TFJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { s := []string{} for key, value := range selectors { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -187,7 +187,7 @@ func (b *TFJobBuilder) ChiefSelectors(selectors map[string]string) *TFJobBuilder } func (b *TFJobBuilder) Datas(volumes map[string]string) *TFJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -198,7 +198,7 @@ func (b *TFJobBuilder) Datas(volumes map[string]string) *TFJobBuilder { } func (b *TFJobBuilder) DataDirs(volumes map[string]string) *TFJobBuilder { - if volumes != nil && len(volumes) != 0 { + if len(volumes) != 0 { s := []string{} for key, value := range volumes { s = append(s, fmt.Sprintf("%v:%v", key, value)) @@ -242,7 +242,7 @@ func (b *TFJobBuilder) EvaluatorMemoryLimit(mem string) *TFJobBuilder { } func (b *TFJobBuilder) EvaluatorSelectors(selectors map[string]string) *TFJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { s := []string{} for key, value := range selectors { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -323,7 +323,7 @@ func (b *TFJobBuilder) PsPort(port int) *TFJobBuilder { } func (b *TFJobBuilder) PsSelectors(selectors map[string]string) *TFJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { s := []string{} for key, value := range selectors { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -414,7 +414,7 @@ func (b *TFJobBuilder) WorkerPort(port int) *TFJobBuilder { } func (b *TFJobBuilder) WorkerSelectors(selectors map[string]string) *TFJobBuilder { - if selectors != nil && len(selectors) != 0 { + if len(selectors) != 0 { s := []string{} for key, value := range selectors { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -445,7 +445,7 @@ func (b *TFJobBuilder) CleanPodPolicy(policy string) *TFJobBuilder { } func (b *TFJobBuilder) RoleSequence(roles []string) *TFJobBuilder { - if roles != nil && len(roles) != 0 { + if len(roles) != 0 { b.argValues["role-sequence"] = strings.Join(roles, ",") } return b diff --git a/pkg/apis/training/volcano_builder.go b/pkg/apis/training/volcano_builder.go index 904a539b5..c7d532927 100644 --- a/pkg/apis/training/volcano_builder.go +++ b/pkg/apis/training/volcano_builder.go @@ -111,7 +111,7 @@ func (b *VolcanoJobBuilder) TaskPort(port int) *VolcanoJobBuilder { } func (b *VolcanoJobBuilder) Labels(labels map[string]string) *VolcanoJobBuilder { - if labels != nil && len(labels) != 0 { + if len(labels) != 0 { s := []string{} for key, value := range labels { s = append(s, fmt.Sprintf("%v=%v", key, value)) @@ -122,7 +122,7 @@ func (b *VolcanoJobBuilder) Labels(labels map[string]string) *VolcanoJobBuilder } func (b *VolcanoJobBuilder) Annotations(annotations map[string]string) *VolcanoJobBuilder { - if annotations != nil && len(annotations) != 0 { + if len(annotations) != 0 { s := []string{} for key, value := range annotations { s = append(s, fmt.Sprintf("%v=%v", key, value)) diff --git a/pkg/apis/types/gpunode.go b/pkg/apis/types/gpunode.go index f36b92a87..3999061f6 100644 --- a/pkg/apis/types/gpunode.go +++ b/pkg/apis/types/gpunode.go @@ -132,7 +132,7 @@ type GPUTopologyNodeInfo struct { GPUTopology GPUTopology `json:"gpuTopology" yaml:"gpuTopology"` CommonGPUNodeInfo `yaml:",inline" json:",inline"` CommonNodeInfo `yaml:",inline" json:",inline"` - Devices []GPUTopologyNodeDevice `yaml:"devices" yaml:"devices"` + Devices []GPUTopologyNodeDevice `json:"devices" yaml:"devices"` } type GPUTopology struct { diff --git a/pkg/apis/utils/pods.go b/pkg/apis/utils/pods.go index f4968ddff..d76013c66 100644 --- a/pkg/apis/utils/pods.go +++ b/pkg/apis/utils/pods.go @@ -2,7 +2,6 @@ package utils import ( "context" - "errors" "fmt" "sort" "strings" @@ -298,12 +297,8 @@ func GetRunningTimeOfPod(pod *v1.Pod) time.Duration { var endTime *metav1.Time // get pod start time allContainerStatuses := []v1.ContainerStatus{} - for _, s := range pod.Status.InitContainerStatuses { - allContainerStatuses = append(allContainerStatuses, s) - } - for _, s := range pod.Status.ContainerStatuses { - allContainerStatuses = append(allContainerStatuses, s) - } + allContainerStatuses = append(allContainerStatuses, pod.Status.InitContainerStatuses...) + allContainerStatuses = append(allContainerStatuses, pod.Status.ContainerStatuses...) startTime, endTime = getStartTimeAndEndTime(allContainerStatuses) if startTime == nil && pod.Status.StartTime != nil { startTime = pod.Status.StartTime @@ -352,11 +347,3 @@ func GetDurationOfPod(pod *v1.Pod) time.Duration { } return GetRunningTimeOfPod(pod) } - -func parseInt(i interface{}) (int, error) { - s, ok := i.(int64) - if !ok { - return 0, errors.New("invalid value") - } - return int(s), nil -} diff --git a/pkg/apis/utils/utils.go b/pkg/apis/utils/utils.go index 94f1d1f52..2a1b1ca72 100644 --- a/pkg/apis/utils/utils.go +++ b/pkg/apis/utils/utils.go @@ -35,7 +35,7 @@ const ( // GetTrainingJobTypes returns the supported training job types func GetTrainingJobTypes() []types.TrainingJobType { trainingTypes := []types.TrainingJobType{} - for trainingType, _ := range types.TrainingTypeMap { + for trainingType := range types.TrainingTypeMap { trainingTypes = append(trainingTypes, trainingType) } return trainingTypes @@ -56,13 +56,13 @@ func TransferTrainingJobType(jobType string) types.TrainingJobType { return types.AllTrainingJob } for trainingType, typeInfo := range types.TrainingTypeMap { - if strings.ToLower(string(typeInfo.Name)) == strings.ToLower(jobType) { + if strings.EqualFold(string(typeInfo.Name), jobType) { return trainingType } - if strings.ToLower(typeInfo.Alias) == strings.ToLower(jobType) { + if strings.EqualFold(typeInfo.Alias, jobType) { return trainingType } - if strings.ToLower(typeInfo.Shorthand) == strings.ToLower(jobType) { + if strings.EqualFold(typeInfo.Shorthand, jobType) { return trainingType } } @@ -82,7 +82,7 @@ func TransferNodeType(nodeType string) types.NodeType { return types.AllKnownNode } for _, typeInfo := range types.NodeTypeSlice { - if strings.ToLower(typeInfo.Alias) == strings.ToLower(nodeType) { + if strings.EqualFold(typeInfo.Alias, nodeType) { return typeInfo.Name } if string(typeInfo.Name) == nodeType { @@ -117,13 +117,13 @@ func TransferServingJobType(jobType string) types.ServingJobType { return types.AllServingJob } for servingType, typeInfo := range types.ServingTypeMap { - if strings.ToLower(string(typeInfo.Name)) == strings.ToLower(jobType) { + if strings.EqualFold(string(typeInfo.Name), jobType) { return servingType } - if strings.ToLower(typeInfo.Alias) == strings.ToLower(jobType) { + if strings.EqualFold(typeInfo.Alias, jobType) { return servingType } - if strings.ToLower(typeInfo.Shorthand) == strings.ToLower(jobType) { + if strings.EqualFold(typeInfo.Shorthand, jobType) { return servingType } } @@ -167,13 +167,13 @@ func TransferPrintFormat(format string) types.FormatStyle { func PrintErrorMessage(message string) { if strings.Contains(message, "please use '--type' or '--version' to filter.") { w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - fmt.Fprintf(w, message) + fmt.Fprint(w, message) w.Flush() return } if strings.Contains(message, "please use '-i' or '--instance' to filter.") { w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - fmt.Fprintf(w, message) + fmt.Fprint(w, message) w.Flush() return } @@ -265,13 +265,13 @@ func TransferModelJobType(jobType string) types.ModelJobType { return types.AllModelJob } for modelJobType, typeInfo := range types.ModelTypeMap { - if strings.ToLower(string(typeInfo.Name)) == strings.ToLower(jobType) { + if strings.EqualFold(string(typeInfo.Name), jobType) { return modelJobType } - if strings.ToLower(typeInfo.Alias) == strings.ToLower(jobType) { + if strings.EqualFold(typeInfo.Alias, jobType) { return modelJobType } - if strings.ToLower(typeInfo.Shorthand) == strings.ToLower(jobType) { + if strings.EqualFold(typeInfo.Shorthand, jobType) { return modelJobType } } diff --git a/pkg/argsbuilder/scale_etjob.go b/pkg/argsbuilder/scale_etjob.go index 0586ac233..da7cbd6ce 100644 --- a/pkg/argsbuilder/scale_etjob.go +++ b/pkg/argsbuilder/scale_etjob.go @@ -71,7 +71,7 @@ func (s *ScaleETJobArgsBuilder) AddCommandFlags(command *cobra.Command) { script = s.(string) } command.Flags().StringVar(&s.args.Name, "name", "", "required, et job name") - command.MarkFlagRequired("name") + _ = command.MarkFlagRequired("name") command.Flags().DurationVarP(&scaleDuration, "timeout", "t", 60*time.Second, "timeout of callback scaler script, like 5s, 2m, or 3h.") command.Flags().IntVar(&s.args.Retry, "retry", 0, "retry times.") command.Flags().IntVar(&s.args.Count, "count", 1, "the nums of you want to add or delete worker.") diff --git a/pkg/argsbuilder/serving.go b/pkg/argsbuilder/serving.go index 528281ef9..faf4a5cb6 100644 --- a/pkg/argsbuilder/serving.go +++ b/pkg/argsbuilder/serving.go @@ -95,7 +95,7 @@ func (s *ServingArgsBuilder) AddCommandFlags(command *cobra.Command) { } command.Flags().StringVar(&s.args.Image, "image", defaultImage, "the docker image name of serving job") command.Flags().StringVar(&s.args.ImagePullPolicy, "imagePullPolicy", "IfNotPresent", "the policy to pull the image, and the default policy is IfNotPresent") - command.Flags().MarkDeprecated("imagePullPolicy", "please use --image-pull-policy instead") + _ = command.Flags().MarkDeprecated("imagePullPolicy", "please use --image-pull-policy instead") command.Flags().StringVar(&s.args.ImagePullPolicy, "image-pull-policy", "IfNotPresent", "the policy to pull the image, and the default policy is IfNotPresent") command.Flags().IntVar(&s.args.GPUCount, "gpus", 0, "the limit GPU count of each replica to run the serve.") @@ -111,19 +111,19 @@ func (s *ServingArgsBuilder) AddCommandFlags(command *cobra.Command) { command.Flags().StringArrayVarP(&envs, "env", "e", []string{}, "the environment variables") command.Flags().BoolVar(&s.args.EnableIstio, "enableIstio", false, "enable Istio for serving or not (disable Istio by default)") - command.Flags().MarkDeprecated("enableIstio", "please use --enable-istio instead") + _ = command.Flags().MarkDeprecated("enableIstio", "please use --enable-istio instead") command.Flags().BoolVar(&s.args.EnableIstio, "enable-istio", false, "enable Istio for serving or not (disable Istio by default)") command.Flags().BoolVar(&s.args.ExposeService, "exposeService", false, "expose service using Istio gateway for external access or not (not expose by default)") - command.Flags().MarkDeprecated("exposeService", "please use --expose-service instead") + _ = command.Flags().MarkDeprecated("exposeService", "please use --expose-service instead") command.Flags().BoolVar(&s.args.ExposeService, "expose-service", false, "expose service using Istio gateway for external access or not (not expose by default)") command.Flags().StringVar(&s.args.Name, "servingName", "", "the serving name") - command.Flags().MarkDeprecated("servingName", "please use --name instead") + _ = command.Flags().MarkDeprecated("servingName", "please use --name instead") command.Flags().StringVar(&s.args.Name, "name", "", "the serving name") command.Flags().StringVar(&s.args.Version, "servingVersion", "", "the serving version") - command.Flags().MarkDeprecated("servingVersion", "please use --version instead") + _ = command.Flags().MarkDeprecated("servingVersion", "please use --version instead") command.Flags().StringVar(&s.args.Version, "version", "", "the serving version") command.Flags().StringArrayVarP(&dataset, "data", "d", []string{}, "specify the trained models datasource to mount for serving, like :") @@ -131,7 +131,7 @@ func (s *ServingArgsBuilder) AddCommandFlags(command *cobra.Command) { command.Flags().StringArrayVarP(&datadir, "data-dir", "", []string{}, "specify the trained models datasource on host to mount for serving, like :") command.Flags().StringArrayVarP(&tempDirSubpathExpr, "temp-dir-subpath-expr", "", []string{}, "specify the datasource subpath to mount to the pod by expression, like :") command.Flags().StringArrayVarP(&tempDir, "temp-dir", "", []string{}, "specify the deployment empty dir, like :") - command.MarkFlagRequired("name") + _ = command.MarkFlagRequired("name") command.Flags().StringArrayVarP(&annotations, "annotation", "a", []string{}, `specify the annotations, usage: "--annotation=key=value" or "--annotation key=value"`) command.Flags().StringArrayVarP(&labels, "label", "l", []string{}, "specify the labels") @@ -587,8 +587,7 @@ func (s *ServingArgsBuilder) validateIstioEnablement() error { if !s.args.EnableIstio { return nil } - var reg *regexp.Regexp - reg = regexp.MustCompile(regexp4serviceName) + reg := regexp.MustCompile(regexp4serviceName) matched := reg.MatchString(s.args.Name) if !matched { return fmt.Errorf("--name should be numbers, letters, dashes, and underscores ONLY") diff --git a/pkg/argsbuilder/serving_kserve.go b/pkg/argsbuilder/serving_kserve.go index 9c08247cd..704d2f968 100644 --- a/pkg/argsbuilder/serving_kserve.go +++ b/pkg/argsbuilder/serving_kserve.go @@ -24,8 +24,7 @@ import ( ) const ( - KServeModelFormat = "modelFormat" - KServeCanaryTrafficPercent = "canaryTrafficPercent" + KServeModelFormat = "modelFormat" ) type KServeArgsBuilder struct { @@ -160,16 +159,3 @@ func (s *KServeArgsBuilder) setModelFormat() error { } return nil } - -func (s *KServeArgsBuilder) setCanaryTrafficPercent() error { - item, ok := s.argValues[KServeCanaryTrafficPercent] - if !ok { - return nil - } - ctp := item.(*int64) - - if *ctp >= 0 && *ctp <= 100 { - s.args.CanaryTrafficPercent = *ctp - } - return nil -} diff --git a/pkg/argsbuilder/serving_tensorflow.go b/pkg/argsbuilder/serving_tensorflow.go index 74d1e29ce..a8a9331e3 100644 --- a/pkg/argsbuilder/serving_tensorflow.go +++ b/pkg/argsbuilder/serving_tensorflow.go @@ -14,18 +14,15 @@ package argsbuilder import ( - "context" "fmt" "reflect" "regexp" "strings" - "github.com/kubeflow/arena/pkg/apis/config" - "github.com/kubeflow/arena/pkg/apis/types" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kubeflow/arena/pkg/apis/types" ) const ( @@ -79,26 +76,26 @@ func (s *TensorflowServingArgsBuilder) AddCommandFlags(command *cobra.Command) { } command.Flags().IntVar(&s.args.Port, "port", 8500, "the port of tensorflow gRPC listening port") command.Flags().IntVar(&s.args.RestfulPort, "restfulPort", 8501, "the port of tensorflow RESTful listening port") - command.Flags().MarkDeprecated("restfulPort", "please use --restful-port instead") + _ = command.Flags().MarkDeprecated("restfulPort", "please use --restful-port instead") command.Flags().IntVar(&s.args.RestfulPort, "restful-port", 8501, "the port of tensorflow RESTful listening port") command.Flags().StringVar(&s.args.ModelName, "modelName", "", "the model name for serving") - command.Flags().MarkDeprecated("modelName", "please use --model-name instead") + _ = command.Flags().MarkDeprecated("modelName", "please use --model-name instead") command.Flags().StringVar(&s.args.ModelName, "model-name", "", "the model name for serving, ignored if --model-config-file flag is set") command.Flags().StringVar(&s.args.ModelPath, "modelPath", "", "the model path for serving in the container") - command.Flags().MarkDeprecated("modelPath", "please use --model-path instead") + _ = command.Flags().MarkDeprecated("modelPath", "please use --model-path instead") command.Flags().StringVar(&s.args.ModelPath, "model-path", "", "the model path for serving in the container, ignored if --model-config-file flag is set, otherwise required") command.Flags().StringVar(&s.args.ModelConfigFile, "modelConfigFile", "", "corresponding with --model_config_file in tensorflow serving") - command.Flags().MarkDeprecated("modelConfigFile", "please use --model-config-file instead") + _ = command.Flags().MarkDeprecated("modelConfigFile", "please use --model-config-file instead") command.Flags().StringVar(&s.args.ModelConfigFile, "model-config-file", "", "corresponding with --model_config_file in tensorflow serving") command.Flags().StringVar(&s.args.MonitoringConfigFile, "monitoring-config-file", "", "corresponding with --monitoring_config_file in tensorflow serving") command.Flags().StringVar(&s.args.VersionPolicy, "versionPolicy", "", "support latest, latest:N, specific:N, all") - command.Flags().MarkDeprecated("versionPolicy", "please use --version-policy instead") + _ = command.Flags().MarkDeprecated("versionPolicy", "please use --version-policy instead") command.Flags().StringVar(&s.args.VersionPolicy, "version-policy", "", "support latest, latest:N, specific:N, all") - command.Flags().MarkDeprecated("version-policy", "please use --model-config-file instead") + _ = command.Flags().MarkDeprecated("version-policy", "please use --model-config-file instead") command.Flags().StringVar(&s.args.Command, "command", "", "the command will inject to container's command.") } @@ -131,8 +128,7 @@ func (s *TensorflowServingArgsBuilder) validateModelName() error { if s.args.ModelName == "" { return fmt.Errorf("model name cannot be blank") } - var reg *regexp.Regexp - reg = regexp.MustCompile(regexp4serviceName) + reg := regexp.MustCompile(regexp4serviceName) matched := reg.MatchString(s.args.ModelName) if !matched { return fmt.Errorf("model name should be numbers, letters, dashes, and underscores ONLY") @@ -170,20 +166,6 @@ func (s *TensorflowServingArgsBuilder) preprocess() (err error) { return nil } -func (s *TensorflowServingArgsBuilder) checkServiceExists() error { - client := config.GetArenaConfiger().GetClientSet() - _, err := client.CoreV1().Services(s.args.Namespace).Get(context.TODO(), s.args.Name, metav1.GetOptions{}) - if err != nil { - if !errors.IsNotFound(err) { - return err - } - s.args.ModelServiceExists = false - } else { - s.args.ModelServiceExists = true - } - return nil -} - func (s *TensorflowServingArgsBuilder) checkPortsIsOk() error { switch { case s.args.Port != 0: diff --git a/pkg/argsbuilder/submit.go b/pkg/argsbuilder/submit.go index 493bf2ff5..8236bdf18 100644 --- a/pkg/argsbuilder/submit.go +++ b/pkg/argsbuilder/submit.go @@ -82,7 +82,7 @@ func (s *SubmitArgsBuilder) AddCommandFlags(command *cobra.Command) { // add option --name command.Flags().StringVar(&s.args.Name, "name", "", "override name") // --name is required - command.MarkFlagRequired("name") + _ = command.MarkFlagRequired("name") // add option --image command.Flags().StringVar(&s.args.Image, "image", "", "the docker image name of training job") // command.MarkFlagRequired("image") @@ -98,7 +98,7 @@ func (s *SubmitArgsBuilder) AddCommandFlags(command *cobra.Command) { // command.MarkFlagRequired("syncSource") // add option --working-dir command.Flags().StringVar(&s.args.WorkingDir, "workingDir", "/root", "working directory to extract the code. If using syncMode, the $workingDir/code contains the code") - command.Flags().MarkDeprecated("workingDir", "please use --working-dir instead") + _ = command.Flags().MarkDeprecated("workingDir", "please use --working-dir instead") command.Flags().StringVar(&s.args.WorkingDir, "working-dir", "/root", "working directory to extract the code. If using syncMode, the $workingDir/code contains the code") // command.MarkFlagRequired("workingDir") @@ -108,7 +108,7 @@ func (s *SubmitArgsBuilder) AddCommandFlags(command *cobra.Command) { command.Flags().StringArrayVarP(&dataSet, "data", "d", []string{}, "specify the datasource to mount to the job, like :") // add option --data-dir,its' value will be get from viper command.Flags().StringArrayVar(&dataDir, "dataDir", []string{}, "the data dir. If you specify /data, it means mounting hostpath /data into container path /data") - command.Flags().MarkDeprecated("dataDir", "please use --data-dir instead") + _ = command.Flags().MarkDeprecated("dataDir", "please use --data-dir instead") command.Flags().StringArrayVar(&dataDir, "data-dir", []string{}, "the data dir. If you specify /data, it means mounting hostpath /data into container path /data") // add option --annotation,its' value will be get from viper command.Flags().StringArrayVarP(&annotations, "annotation", "a", []string{}, `the annotations, usage: "--annotation=key=value" or "--annotation key=value"`) @@ -239,11 +239,6 @@ func (s *SubmitArgsBuilder) UpdateArgs(args *types.CommonSubmitArgs) { s.args = args } -// getArgs returns the CommonSubmitArgs -func (s *SubmitArgsBuilder) getArgs() *types.CommonSubmitArgs { - return s.args -} - // checkNameAndPriorityClassName is used to check the name func (s *SubmitArgsBuilder) checkNameAndPriorityClassName() error { if s.args.Name == "" { diff --git a/pkg/argsbuilder/submit_deepspeedjob.go b/pkg/argsbuilder/submit_deepspeedjob.go index 0928798bb..c042f3589 100644 --- a/pkg/argsbuilder/submit_deepspeedjob.go +++ b/pkg/argsbuilder/submit_deepspeedjob.go @@ -191,8 +191,7 @@ func (s *SubmitDeepSpeedJobArgsBuilder) setLauncherAnnotations() error { if !ok { return nil } - var annotations *[]string - annotations = item.(*[]string) + annotations := item.(*[]string) if len(*annotations) == 0 { return nil } @@ -211,8 +210,7 @@ func (s *SubmitDeepSpeedJobArgsBuilder) setWorkerAnnotations() error { if !ok { return nil } - var annotations *[]string - annotations = item.(*[]string) + annotations := item.(*[]string) if len(*annotations) == 0 { return nil } diff --git a/pkg/argsbuilder/submit_etjob.go b/pkg/argsbuilder/submit_etjob.go index 288cf1586..57010e9cf 100644 --- a/pkg/argsbuilder/submit_etjob.go +++ b/pkg/argsbuilder/submit_etjob.go @@ -223,8 +223,7 @@ func (s *SubmitETJobArgsBuilder) setLauncherAnnotations() error { if !ok { return nil } - var annotations *[]string - annotations = item.(*[]string) + annotations := item.(*[]string) if len(*annotations) == 0 { return nil } @@ -243,8 +242,7 @@ func (s *SubmitETJobArgsBuilder) setWorkerAnnotations() error { if !ok { return nil } - var annotations *[]string - annotations = item.(*[]string) + annotations := item.(*[]string) if len(*annotations) == 0 { return nil } diff --git a/pkg/argsbuilder/submit_sparkjob.go b/pkg/argsbuilder/submit_sparkjob.go index 6f0ee87f7..6fe67bad1 100644 --- a/pkg/argsbuilder/submit_sparkjob.go +++ b/pkg/argsbuilder/submit_sparkjob.go @@ -69,7 +69,7 @@ func (s *SubmitSparkJobArgsBuilder) AddCommandFlags(command *cobra.Command) { labels []string ) command.Flags().StringVar(&s.args.Name, "name", "", "override name") - command.MarkFlagRequired("name") + _ = command.MarkFlagRequired("name") command.Flags().StringVar(&s.args.Image, "image", "registry.aliyuncs.com/acs/spark:v2.4.0", "the docker image name of training job") command.Flags().IntVar(&s.args.Executor.Replicas, "replicas", 1, "the executor's number to run the distributed training.") diff --git a/pkg/argsbuilder/submit_sync_code.go b/pkg/argsbuilder/submit_sync_code.go index 4661ab959..c5d5c6958 100644 --- a/pkg/argsbuilder/submit_sync_code.go +++ b/pkg/argsbuilder/submit_sync_code.go @@ -93,15 +93,15 @@ func (s *SubmitSyncCodeArgsBuilder) AddCommandFlags(command *cobra.Command) { s.subBuilders[name].AddCommandFlags(command) } command.Flags().StringVar(&s.args.SyncMode, "syncMode", "", "syncMode: support rsync, hdfs, git") - command.Flags().MarkDeprecated("syncMode", "please use --sync-mode instead") + _ = command.Flags().MarkDeprecated("syncMode", "please use --sync-mode instead") command.Flags().StringVar(&s.args.SyncMode, "sync-mode", "", "syncMode: support rsync, hdfs, git") // command.MarkFlagRequired("syncMode") command.Flags().StringVar(&s.args.SyncSource, "syncSource", "", "syncSource: for rsync, it's like 10.88.29.56::backup/data/logoRecoTrain.zip; for git, it's like https://github.com/kubeflow/tf-operator.git") - command.Flags().MarkDeprecated("syncSource", "please use --sync-source instead") + _ = command.Flags().MarkDeprecated("syncSource", "please use --sync-source instead") command.Flags().StringVar(&s.args.SyncSource, "sync-source", "", "sync-source: for rsync, it's like 10.88.29.56::backup/data/logoRecoTrain.zip; for git, it's like https://github.com/kubeflow/tf-operator.git") command.Flags().StringVar(&s.args.SyncImage, "syncImage", "", "the docker image of syncImage") - command.Flags().MarkDeprecated("syncImage", "please use --sync-image instead") + _ = command.Flags().MarkDeprecated("syncImage", "please use --sync-image instead") command.Flags().StringVar(&s.args.SyncImage, "sync-image", "", "the docker image of syncImage") } diff --git a/pkg/argsbuilder/submit_tensorboard.go b/pkg/argsbuilder/submit_tensorboard.go index 85bf81593..023e1af7d 100644 --- a/pkg/argsbuilder/submit_tensorboard.go +++ b/pkg/argsbuilder/submit_tensorboard.go @@ -72,7 +72,7 @@ func (s *SubmitTensorboardArgsBuilder) AddCommandFlags(command *cobra.Command) { } command.Flags().BoolVar(&s.args.UseTensorboard, "tensorboard", false, "enable tensorboard") command.Flags().StringVar(&s.args.TensorboardImage, "tensorboardImage", "registry.cn-zhangjiakou.aliyuncs.com/acs/tensorflow:1.12.0-devel", "the docker image for tensorboard") - command.Flags().MarkDeprecated("tensorboardImage", "please use --tensorboard-image instead") + _ = command.Flags().MarkDeprecated("tensorboardImage", "please use --tensorboard-image instead") command.Flags().StringVar(&s.args.TensorboardImage, "tensorboard-image", "registry.cn-zhangjiakou.aliyuncs.com/acs/tensorflow:1.12.0-devel", "the docker image for tensorboard") command.Flags().StringVar(&s.args.TrainingLogdir, "logdir", "/training_logs", "the training logs dir, default is /training_logs") } diff --git a/pkg/argsbuilder/submit_tfjob.go b/pkg/argsbuilder/submit_tfjob.go index c2e16c4d0..787644bd2 100644 --- a/pkg/argsbuilder/submit_tfjob.go +++ b/pkg/argsbuilder/submit_tfjob.go @@ -91,48 +91,48 @@ func (s *SubmitTFJobArgsBuilder) AddCommandFlags(command *cobra.Command) { ttlAfterFinished time.Duration ) command.Flags().StringVar(&s.args.WorkerImage, "workerImage", "", "the docker image for tensorflow workers") - command.Flags().MarkDeprecated("workerImage", "please use --worker-image instead") + _ = command.Flags().MarkDeprecated("workerImage", "please use --worker-image instead") command.Flags().StringVar(&s.args.WorkerImage, "worker-image", "", "the docker image for tensorflow workers") command.Flags().StringVar(&s.args.PSImage, "psImage", "", "the docker image for tensorflow workers") - command.Flags().MarkDeprecated("psImage", "please use --ps-image instead") + _ = command.Flags().MarkDeprecated("psImage", "please use --ps-image instead") command.Flags().StringVar(&s.args.PSImage, "ps-image", "", "the docker image for tensorflow workers") command.Flags().IntVar(&s.args.PSCount, "ps", 0, "the number of the parameter servers.") command.Flags().IntVar(&s.args.PSPort, "psPort", 0, "the port of the parameter server.") - command.Flags().MarkDeprecated("psPort", "please use --ps-port instead") + _ = command.Flags().MarkDeprecated("psPort", "please use --ps-port instead") command.Flags().IntVar(&s.args.PSPort, "ps-port", 0, "the port of the parameter server.") command.Flags().IntVar(&s.args.WorkerPort, "workerPort", 0, "the port of the worker.") - command.Flags().MarkDeprecated("workerPort", "please use --worker-port instead") + _ = command.Flags().MarkDeprecated("workerPort", "please use --worker-port instead") command.Flags().IntVar(&s.args.WorkerPort, "worker-port", 0, "the port of the worker.") command.Flags().StringVar(&s.args.WorkerCpu, "workerCpu", "", "the cpu resource to use for the worker, like 1 for 1 core.") - command.Flags().MarkDeprecated("workerCpu", "please use --worker-cpu instead") + _ = command.Flags().MarkDeprecated("workerCpu", "please use --worker-cpu instead") command.Flags().StringVar(&s.args.WorkerCpu, "worker-cpu", "", "the cpu resource to use for the worker, like 1 for 1 core.") command.Flags().StringVar(&s.args.WorkerCpuLimit, "worker-cpu-limit", "", "the cpu resource limit to use for the worker, like 1 for 1 core.") command.Flags().StringVar(&s.args.WorkerMemory, "workerMemory", "", "the memory resource to use for the worker, like 1Gi.") - command.Flags().MarkDeprecated("workerMemory", "please use --worker-memory instead") + _ = command.Flags().MarkDeprecated("workerMemory", "please use --worker-memory instead") command.Flags().StringVar(&s.args.WorkerMemory, "worker-memory", "", "the memory resource to use for the worker, like 1Gi.") command.Flags().StringVar(&s.args.WorkerMemoryLimit, "worker-memory-limit", "", "the memory resource limit to use for the worker, like 1Gi.") command.Flags().StringVar(&s.args.PSCpu, "psCpu", "", "the cpu resource to use for the parameter servers, like 1 for 1 core.") - command.Flags().MarkDeprecated("psCpu", "please use --ps-cpu instead") + _ = command.Flags().MarkDeprecated("psCpu", "please use --ps-cpu instead") command.Flags().StringVar(&s.args.PSCpu, "ps-cpu", "", "the cpu resource to use for the parameter servers, like 1 for 1 core.") command.Flags().StringVar(&s.args.PSCpuLimit, "ps-cpu-limit", "", "the cpu resource limit to use for the parameter servers, like 1 for 1 core.") command.Flags().IntVar(&s.args.PSGpu, "ps-gpus", 0, "the gpu resource to use for the parameter servers, like 1 for 1 gpu.") command.Flags().StringVar(&s.args.PSMemory, "psMemory", "", "the memory resource to use for the parameter servers, like 1Gi.") - command.Flags().MarkDeprecated("psMemory", "please use --ps-memory instead") + _ = command.Flags().MarkDeprecated("psMemory", "please use --ps-memory instead") command.Flags().StringVar(&s.args.PSMemory, "ps-memory", "", "the memory resource to use for the parameter servers, like 1Gi.") command.Flags().StringVar(&s.args.PSMemoryLimit, "ps-memory-limit", "", "the memory resource limit to use for the parameter servers, like 1Gi.") // How to clean up Task command.Flags().StringVar(&s.args.CleanPodPolicy, "cleanTaskPolicy", "Running", "How to clean tasks after Training is done, support Running, None and All.") - command.Flags().MarkDeprecated("cleanTaskPolicy", "please use --clean-task-policy instead") + _ = command.Flags().MarkDeprecated("cleanTaskPolicy", "please use --clean-task-policy instead") command.Flags().StringVar(&s.args.CleanPodPolicy, "clean-task-policy", "Running", "How to clean tasks after Training is done, support Running, None and All.") command.Flags().DurationVar(&runningTimeout, "running-timeout", runningTimeout, "Specifies the duration since startTime during which the job can remain active before it is terminated(e.g. '5s', '1m', '2h22m').") @@ -143,27 +143,27 @@ func (s *SubmitTFJobArgsBuilder) AddCommandFlags(command *cobra.Command) { command.Flags().BoolVar(&s.args.UseChief, "chief", false, "enable chief, which is required for estimator.") command.Flags().BoolVar(&s.args.UseEvaluator, "evaluator", false, "enable evaluator, which is optional for estimator.") command.Flags().StringVar(&s.args.ChiefCpu, "ChiefCpu", "", "the cpu resource to use for the Chief, like 1 for 1 core.") - command.Flags().MarkDeprecated("ChiefCpu", "please use --chief-cpu instead") + _ = command.Flags().MarkDeprecated("ChiefCpu", "please use --chief-cpu instead") command.Flags().StringVar(&s.args.ChiefCpu, "chief-cpu", "", "the cpu resource to use for the Chief, like 1 for 1 core.") command.Flags().StringVar(&s.args.ChiefCpuLimit, "chief-cpu-limit", "", "the cpu resource limit to use for the Chief, like 1 for 1 core.") command.Flags().StringVar(&s.args.ChiefMemory, "ChiefMemory", "", "the memory resource to use for the Chief, like 1Gi.") - command.Flags().MarkDeprecated("ChiefMemory", "please use --chief-memory instead") + _ = command.Flags().MarkDeprecated("ChiefMemory", "please use --chief-memory instead") command.Flags().StringVar(&s.args.ChiefMemory, "chief-memory", "", "the memory resource to use for the Chief, like 1Gi.") command.Flags().StringVar(&s.args.ChiefMemoryLimit, "chief-memory-limit", "", "the memory liit resource to use for the Chief, like 1Gi.") command.Flags().StringVar(&s.args.EvaluatorCpu, "evaluatorCpu", "", "the cpu resource to use for the evaluator, like 1 for 1 core.") - command.Flags().MarkDeprecated("evaluatorCpu", "please use --evaluator-cpu instead") + _ = command.Flags().MarkDeprecated("evaluatorCpu", "please use --evaluator-cpu instead") command.Flags().StringVar(&s.args.EvaluatorCpu, "evaluator-cpu", "", "the cpu resource to use for the evaluator, like 1 for 1 core.") command.Flags().StringVar(&s.args.EvaluatorCpuLimit, "evaluator-cpu-limit", "", "the cpu resource limit to use for the evaluator, like 1 for 1 core.") command.Flags().StringVar(&s.args.EvaluatorMemory, "evaluatorMemory", "", "the memory resource to use for the evaluator, like 1Gi.") - command.Flags().MarkDeprecated("evaluatorMemory", "please use --evaluator-memory instead") + _ = command.Flags().MarkDeprecated("evaluatorMemory", "please use --evaluator-memory instead") command.Flags().StringVar(&s.args.EvaluatorMemory, "evaluator-memory", "", "the memory resource to use for the evaluator, like 1Gi.") command.Flags().StringVar(&s.args.EvaluatorMemoryLimit, "evaluator-memory-limit", "", "the memory resource limit to use for the evaluator, like 1Gi.") command.Flags().IntVar(&s.args.ChiefPort, "chiefPort", 0, "the port of the chief.") - command.Flags().MarkDeprecated("chiefPort", "please use --chief-port instead") + _ = command.Flags().MarkDeprecated("chiefPort", "please use --chief-port instead") command.Flags().IntVar(&s.args.ChiefPort, "chief-port", 0, "the port of the chief.") command.Flags().StringArrayVar(&workerSelectors, "worker-selector", []string{}, `assigning jobs with "Worker" role to some k8s particular nodes(this option would cover --selector), usage: "--worker-selector=key=value"`) command.Flags().StringArrayVar(&chiefSelectors, "chief-selector", []string{}, `assigning jobs with "Chief" role to some k8s particular nodes(this option would cover --selector), usage: "--chief-selector=key=value"`) @@ -230,11 +230,6 @@ func (s *SubmitTFJobArgsBuilder) Build() error { return nil } -func (s *SubmitTFJobArgsBuilder) setCommand(args []string) error { - s.args.CommonSubmitArgs.Command = strings.Join(args, " ") - return nil -} - func (s *SubmitTFJobArgsBuilder) setRuntime() error { // Get the runtime name annotations := s.args.CommonSubmitArgs.Annotations diff --git a/pkg/argsbuilder/submit_volcanojob.go b/pkg/argsbuilder/submit_volcanojob.go index 6bc7e9d99..baf6f84df 100644 --- a/pkg/argsbuilder/submit_volcanojob.go +++ b/pkg/argsbuilder/submit_volcanojob.go @@ -68,33 +68,33 @@ func (s *SubmitVolcanoJobArgsBuilder) AddCommandFlags(command *cobra.Command) { labels []string ) command.Flags().StringVar(&s.args.Name, "name", "", "assign the job name") - command.MarkFlagRequired("name") + _ = command.MarkFlagRequired("name") command.Flags().IntVar(&(s.args.MinAvailable), "minAvailable", 1, "The minimal available pods to run for this Job. default value is 1") - command.Flags().MarkDeprecated("minAvailable", "please use --min-available instead") + _ = command.Flags().MarkDeprecated("minAvailable", "please use --min-available instead") command.Flags().IntVar(&(s.args.MinAvailable), "min-available", 1, "The minimal available pods to run for this Job. default value is 1") command.Flags().StringVar(&s.args.Queue, "queue", "default", "Specifies the queue that will be used in the scheduler, default queue is used this leaves empty") command.Flags().StringVar(&s.args.SchedulerName, "schedulerName", "volcano", "Specifies the scheduler Name, default is volcano when not specified") - command.Flags().MarkDeprecated("schedulerName", "please use --scheduler-name instead") + _ = command.Flags().MarkDeprecated("schedulerName", "please use --scheduler-name instead") command.Flags().StringVar(&s.args.SchedulerName, "scheduler-name", "volcano", "Specifies the scheduler Name, default is volcano when not specified") // each task related information name,image,replica number command.Flags().StringVar(&s.args.TaskName, "taskName", "task", "the task name of volcano job, default value is task") - command.Flags().MarkDeprecated("taskName", "please use --task-name instead") + _ = command.Flags().MarkDeprecated("taskName", "please use --task-name instead") command.Flags().StringVar(&s.args.TaskName, "task-name", "task", "the task name of volcano job, default value is task") command.Flags().StringArrayVar(&s.args.TaskImages, "taskImages", []string{"ubuntu", "nginx", "busybox"}, "the docker images of different tasks of volcano job. default used 3 tasks with ubuntu,nginx and busybox images") - command.Flags().MarkDeprecated("taskImages", "please use --task-images instead") + _ = command.Flags().MarkDeprecated("taskImages", "please use --task-images instead") command.Flags().StringSliceVar(&s.args.TaskImages, "task-images", []string{"ubuntu", "nginx", "busybox"}, "the docker images of different tasks of volcano job. default used 3 tasks with ubuntu,nginx and busybox images") command.Flags().IntVar(&s.args.TaskReplicas, "taskReplicas", 1, "the task replica's number to run the distributed tasks. default value is 1") - command.Flags().MarkDeprecated("taskReplicas", "please use --task-replicas instead") + _ = command.Flags().MarkDeprecated("taskReplicas", "please use --task-replicas instead") command.Flags().IntVar(&s.args.TaskReplicas, "task-replicas", 1, "the task replica's number to run the distributed tasks. default value is 1") // cpu and memory request command.Flags().StringVar(&s.args.TaskCPU, "taskCPU", "250m", "cpu request for each task replica / pod. default value is 250m") - command.Flags().MarkDeprecated("taskCPU", "please use --task-cpu instead") + _ = command.Flags().MarkDeprecated("taskCPU", "please use --task-cpu instead") command.Flags().StringVar(&s.args.TaskCPU, "task-cpu", "250m", "cpu request for each task replica / pod. default value is 250m") command.Flags().StringVar(&s.args.TaskMemory, "taskMemory", "128Mi", "memory request for each task replica/pod.default value is 128Mi)") - command.Flags().MarkDeprecated("taskMemory", "please use --task-memory instead") + _ = command.Flags().MarkDeprecated("taskMemory", "please use --task-memory instead") command.Flags().StringVar(&s.args.TaskMemory, "task-memory", "128Mi", "memory request for each task replica/pod.default value is 128Mi)") command.Flags().IntVar(&s.args.TaskPort, "taskPort", 2222, "the task port number. default value is 2222") - command.Flags().MarkDeprecated("taskPort", "please use --task-port instead") + _ = command.Flags().MarkDeprecated("taskPort", "please use --task-port instead") command.Flags().IntVar(&s.args.TaskPort, "task-port", 2222, "the task port number. default value is 2222") command.Flags().StringArrayVarP(&annotations, "annotation", "a", []string{}, `the annotations, usage: "--annotation=key=value" or "--annotation key=value"`) command.Flags().StringArrayVarP(&labels, "label", "l", []string{}, "specify the label") diff --git a/pkg/argsbuilder/traffic_router_builder.go b/pkg/argsbuilder/traffic_router_builder.go index 7b985c3cc..919a2f65c 100644 --- a/pkg/argsbuilder/traffic_router_builder.go +++ b/pkg/argsbuilder/traffic_router_builder.go @@ -70,8 +70,8 @@ func (s *TrafficRouterArgsBuilder) AddCommandFlags(command *cobra.Command) { command.Flags().StringArrayVarP(&versions, "version-weight", "v", []string{}, "set the version and weight,format is: version:weight, e.g. --version-weight version1:20 --version-weight version2:40") //command.Flags().StringVar(&s.args.Versions, "versions", "", "Model versions which the traffic will be routed to, e.g. 1,2,3") //command.Flags().StringVar(&s.args.Weights, "weights", "", "Weight percentage values for each model version which the traffic will be routed to,e.g. 70,20,10") - command.MarkFlagRequired("name") - command.MarkFlagRequired("version-weight") + _ = command.MarkFlagRequired("name") + _ = command.MarkFlagRequired("version-weight") s.AddArgValue("version-weight", &versions) } @@ -100,8 +100,7 @@ func (s *TrafficRouterArgsBuilder) Build() error { } func (s *TrafficRouterArgsBuilder) checkModelName() error { - var reg *regexp.Regexp - reg = regexp.MustCompile(regexp4serviceName) + reg := regexp.MustCompile(regexp4serviceName) matched := reg.MatchString(s.args.ServingName) if !matched { return fmt.Errorf("parameter model name should be numbers, letters, dashes, and underscores ONLY") @@ -120,7 +119,7 @@ func (s *TrafficRouterArgsBuilder) setVersionWeights() error { exist := map[string]bool{} for _, vw := range *versions { item := strings.Split(vw, ":") - if exist[item[0]] == true { + if exist[item[0]] { return fmt.Errorf("the version %v has duplicate weight", item[0]) } exist[item[0]] = true diff --git a/pkg/argsbuilder/update_serving.go b/pkg/argsbuilder/update_serving.go index e3f60691b..f77bba15c 100644 --- a/pkg/argsbuilder/update_serving.go +++ b/pkg/argsbuilder/update_serving.go @@ -276,14 +276,6 @@ func (s *UpdateServingArgsBuilder) checkName() error { return nil } -func (s *UpdateServingArgsBuilder) checkVersion() error { - if s.args.Version == "" { - return fmt.Errorf("version not set, please set it") - } - log.Debugf("version is %v", s.args.Version) - return nil -} - func (s *UpdateServingArgsBuilder) checkReplicas() error { if s.args.Replicas < 0 { return fmt.Errorf("replicas not valid, must be greater than 0") diff --git a/pkg/argsbuilder/update_serving_tensorflow.go b/pkg/argsbuilder/update_serving_tensorflow.go index 207e3f22d..a209422cc 100644 --- a/pkg/argsbuilder/update_serving_tensorflow.go +++ b/pkg/argsbuilder/update_serving_tensorflow.go @@ -15,13 +15,14 @@ package argsbuilder import ( "fmt" - log "github.com/sirupsen/logrus" "reflect" "regexp" "strings" - "github.com/kubeflow/arena/pkg/apis/types" + log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + + "github.com/kubeflow/arena/pkg/apis/types" ) type UpdateTensorflowServingArgsBuilder struct { @@ -99,8 +100,7 @@ func (s *UpdateTensorflowServingArgsBuilder) Build() error { func (s *UpdateTensorflowServingArgsBuilder) validateModelName() error { if s.args.ModelName != "" { - var reg *regexp.Regexp - reg = regexp.MustCompile(regexp4serviceName) + reg := regexp.MustCompile(regexp4serviceName) matched := reg.MatchString(s.args.ModelName) if !matched { return fmt.Errorf("model name should be numbers, letters, dashes, and underscores ONLY") diff --git a/pkg/commands/cron/cron_tfjob.go b/pkg/commands/cron/cron_tfjob.go index ec0039608..9190373cd 100644 --- a/pkg/commands/cron/cron_tfjob.go +++ b/pkg/commands/cron/cron_tfjob.go @@ -17,7 +17,7 @@ func NewCronTFJobCommand() *cobra.Command { Short: "Submit a cron tfjob.", Aliases: []string{"tf"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/cron/delete.go b/pkg/commands/cron/delete.go index 26ac27dd5..78be8f6c9 100644 --- a/pkg/commands/cron/delete.go +++ b/pkg/commands/cron/delete.go @@ -15,7 +15,7 @@ func NewCronDeleteCommand() *cobra.Command { Short: "Delete a cron and its associated job", Aliases: []string{"del"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/cron/get.go b/pkg/commands/cron/get.go index b473755d1..a9883589e 100644 --- a/pkg/commands/cron/get.go +++ b/pkg/commands/cron/get.go @@ -29,7 +29,7 @@ func NewCronGetCommand() *cobra.Command { Use: "get", Short: "get cron by name.", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/cron/list.go b/pkg/commands/cron/list.go index 7deb7aec2..a69496bc6 100644 --- a/pkg/commands/cron/list.go +++ b/pkg/commands/cron/list.go @@ -30,7 +30,7 @@ func NewCronListCommand() *cobra.Command { Use: "list", Short: "list all the crons.", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/cron/resume.go b/pkg/commands/cron/resume.go index c4a3eeb70..97f003886 100644 --- a/pkg/commands/cron/resume.go +++ b/pkg/commands/cron/resume.go @@ -28,7 +28,7 @@ func NewCronResumeCommand() *cobra.Command { Use: "resume", Short: "resume a suspend cron.", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/cron/suspend.go b/pkg/commands/cron/suspend.go index 8080d373d..2268c77d3 100644 --- a/pkg/commands/cron/suspend.go +++ b/pkg/commands/cron/suspend.go @@ -28,7 +28,7 @@ func NewCronSuspendCommand() *cobra.Command { Use: "suspend", Short: "suspend cron.", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/data/list.go b/pkg/commands/data/list.go index 8152c3877..ca56a6c07 100644 --- a/pkg/commands/data/list.go +++ b/pkg/commands/data/list.go @@ -29,7 +29,7 @@ func NewDataListCommand() *cobra.Command { Use: "list", Short: "list all the data volume.", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/evaluate/delete.go b/pkg/commands/evaluate/delete.go index 74e198608..bee9bd3cc 100644 --- a/pkg/commands/evaluate/delete.go +++ b/pkg/commands/evaluate/delete.go @@ -14,7 +14,7 @@ func NewEvaluateDeleteCommand() *cobra.Command { Short: "Delete a evaluate job", Aliases: []string{"del"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/evaluate/evaluate_model.go b/pkg/commands/evaluate/evaluate_model.go index fc834ce30..e5205d04b 100644 --- a/pkg/commands/evaluate/evaluate_model.go +++ b/pkg/commands/evaluate/evaluate_model.go @@ -16,7 +16,7 @@ func NewEvaluateModelCommand() *cobra.Command { Use: "model", Short: "Submit a model evaluate job.", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/evaluate/get.go b/pkg/commands/evaluate/get.go index c5e18c592..5868b9ac9 100644 --- a/pkg/commands/evaluate/get.go +++ b/pkg/commands/evaluate/get.go @@ -14,7 +14,7 @@ func NewEvaluateGetCommand() *cobra.Command { Use: "get", Short: "get evaluate job by name.", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/evaluate/list.go b/pkg/commands/evaluate/list.go index 968646f67..99cd37c98 100644 --- a/pkg/commands/evaluate/list.go +++ b/pkg/commands/evaluate/list.go @@ -15,7 +15,7 @@ func NewEvaluateListCommand() *cobra.Command { Short: "List evaluate jobs", Aliases: []string{"ls"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/model/delete.go b/pkg/commands/model/delete.go index 2283cc089..cfc507b71 100644 --- a/pkg/commands/model/delete.go +++ b/pkg/commands/model/delete.go @@ -15,7 +15,7 @@ func NewDeleteModelJobCommand() *cobra.Command { Use: "delete", Short: "Delete a model job", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/model/get.go b/pkg/commands/model/get.go index a871a4877..b04220138 100644 --- a/pkg/commands/model/get.go +++ b/pkg/commands/model/get.go @@ -16,7 +16,7 @@ func NewGetModelJobCommand() *cobra.Command { Use: "get", Short: "Get a model job", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/model/list.go b/pkg/commands/model/list.go index 6a1234510..8cea32cd9 100644 --- a/pkg/commands/model/list.go +++ b/pkg/commands/model/list.go @@ -18,7 +18,7 @@ func NewListModelJobsCommand() *cobra.Command { Short: "List all the model jobs", Aliases: []string{"ls"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/model/submit_benchmark.go b/pkg/commands/model/submit_benchmark.go index cfaa15a18..cc1734613 100644 --- a/pkg/commands/model/submit_benchmark.go +++ b/pkg/commands/model/submit_benchmark.go @@ -16,7 +16,7 @@ func NewSubmitModelBenchmarkJobCommand() *cobra.Command { Use: "benchmark", Short: "Submit a model benchmark job", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/model/submit_evaluate.go b/pkg/commands/model/submit_evaluate.go index 4661a2277..6cc0049da 100644 --- a/pkg/commands/model/submit_evaluate.go +++ b/pkg/commands/model/submit_evaluate.go @@ -16,7 +16,7 @@ func NewSubmitModelEvaluateJobCommand() *cobra.Command { Use: "evaluate", Short: "Submit a model evaluate job", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/model/submit_optimize.go b/pkg/commands/model/submit_optimize.go index 10dbb7797..f447b4ac6 100644 --- a/pkg/commands/model/submit_optimize.go +++ b/pkg/commands/model/submit_optimize.go @@ -16,7 +16,7 @@ func NewSubmitModelOptimizeJobCommand() *cobra.Command { Use: "optimize", Short: "Submit a model optimize job, this is a experimental feature", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/model/submit_profile.go b/pkg/commands/model/submit_profile.go index bbdf4f435..be8b5aa78 100644 --- a/pkg/commands/model/submit_profile.go +++ b/pkg/commands/model/submit_profile.go @@ -16,7 +16,7 @@ func NewSubmitModelProfileJobCommand() *cobra.Command { Use: "profile", Short: "Submit a model profile job", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/serving/attach.go b/pkg/commands/serving/attach.go index b59cc6192..865a4987e 100644 --- a/pkg/commands/serving/attach.go +++ b/pkg/commands/serving/attach.go @@ -33,7 +33,7 @@ func NewAttachCommand() *cobra.Command { Use: "attach JOB [-i INSTANCE] [-c CONTAINER]", Short: "Attach a serving job and execute some commands", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/serving/delete.go b/pkg/commands/serving/delete.go index 25c4bf5d5..66ce36ce7 100644 --- a/pkg/commands/serving/delete.go +++ b/pkg/commands/serving/delete.go @@ -33,7 +33,7 @@ func NewDeleteCommand() *cobra.Command { Short: "Delete a serving job and its associated instances", Aliases: []string{"del"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/serving/get.go b/pkg/commands/serving/get.go index 46924a16f..028e1c491 100644 --- a/pkg/commands/serving/get.go +++ b/pkg/commands/serving/get.go @@ -17,15 +17,14 @@ package serving import ( "fmt" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/kubeflow/arena/pkg/apis/arenaclient" "github.com/kubeflow/arena/pkg/apis/types" "github.com/kubeflow/arena/pkg/apis/utils" - "github.com/spf13/cobra" - "github.com/spf13/viper" ) -var output string - // NewGetCommand func NewGetCommand() *cobra.Command { var servingType string @@ -39,7 +38,7 @@ func NewGetCommand() *cobra.Command { Use: "get JOB [-T JOB_TYPE] [-v JOB_VERSION]", Short: "Display a serving job details", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/serving/list.go b/pkg/commands/serving/list.go index 9a8a54df5..96acc4596 100644 --- a/pkg/commands/serving/list.go +++ b/pkg/commands/serving/list.go @@ -19,7 +19,7 @@ func NewListCommand() *cobra.Command { Short: "List all the serving jobs", Aliases: []string{"ls"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ @@ -36,7 +36,7 @@ func NewListCommand() *cobra.Command { }, } command.Flags().BoolVar(&allNamespaces, "allNamespaces", false, "show all the namespaces") - command.Flags().MarkDeprecated("allNamespaces", "please use --all-namespaces instead") + _ = command.Flags().MarkDeprecated("allNamespaces", "please use --all-namespaces instead") command.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "show all the namespaces") command.Flags().StringVarP(&format, "output", "o", "wide", "Output format. One of: json|yaml|wide") command.Flags().StringVarP(&servingType, "type", "T", "", fmt.Sprintf("The serving type, the possible option is [%v]. (optional)", utils.GetSupportServingJobTypesInfo())) diff --git a/pkg/commands/serving/logs.go b/pkg/commands/serving/logs.go index ecb0b0eb2..f640c541e 100644 --- a/pkg/commands/serving/logs.go +++ b/pkg/commands/serving/logs.go @@ -34,7 +34,7 @@ func NewLogsCommand() *cobra.Command { Short: "Print the logs of a serving job", Aliases: []string{"log"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/serving/serving_custom.go b/pkg/commands/serving/serving_custom.go index e44ca0609..00d838998 100644 --- a/pkg/commands/serving/serving_custom.go +++ b/pkg/commands/serving/serving_custom.go @@ -18,7 +18,7 @@ func NewSubmitCustomServingJobCommand() *cobra.Command { Short: "Submit custom serving to deploy and serve machine learning models.", Aliases: []string{"custom"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { //if len(args) == 0 { diff --git a/pkg/commands/serving/serving_kserve.go b/pkg/commands/serving/serving_kserve.go index 2692de867..4ad12ae78 100644 --- a/pkg/commands/serving/serving_kserve.go +++ b/pkg/commands/serving/serving_kserve.go @@ -19,7 +19,7 @@ func NewSubmitKServeJobCommand() *cobra.Command { Short: "Submit kserve to deploy and serve machine learning models.", Aliases: []string{"kserve"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/serving/serving_kubeflow.go b/pkg/commands/serving/serving_kubeflow.go index f3806900a..947c499d9 100644 --- a/pkg/commands/serving/serving_kubeflow.go +++ b/pkg/commands/serving/serving_kubeflow.go @@ -18,7 +18,7 @@ func NewSubmitKFServingJobCommand() *cobra.Command { Short: "Submit kfserving to deploy and serve machine learning models.", Aliases: []string{"kfs", "kf"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/serving/serving_seldon.go b/pkg/commands/serving/serving_seldon.go index f892b7a6c..e20b645ce 100644 --- a/pkg/commands/serving/serving_seldon.go +++ b/pkg/commands/serving/serving_seldon.go @@ -18,7 +18,7 @@ func NewSubmitSeldonServingJobCommand() *cobra.Command { Short: "Submit seldon to deploy and serve machine learning models.", Aliases: []string{"seldon"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/serving/serving_tensorflow.go b/pkg/commands/serving/serving_tensorflow.go index 0b4ffd5bc..c7760ed0e 100644 --- a/pkg/commands/serving/serving_tensorflow.go +++ b/pkg/commands/serving/serving_tensorflow.go @@ -18,7 +18,7 @@ func NewSubmitTFServingJobCommand() *cobra.Command { Short: "Submit tensorflow serving job to deploy and serve machine learning models.", Aliases: []string{"tf"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/serving/serving_tensorrt.go b/pkg/commands/serving/serving_tensorrt.go index 726dc44fb..ad4ed2a8e 100644 --- a/pkg/commands/serving/serving_tensorrt.go +++ b/pkg/commands/serving/serving_tensorrt.go @@ -18,7 +18,7 @@ func NewSubmitTRTServingJobCommand() *cobra.Command { Short: "Submit tensorRT inference serving job to deploy and serve machine learning models.", Aliases: []string{"trt"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/serving/serving_triton.go b/pkg/commands/serving/serving_triton.go index b1b316998..c85375c53 100644 --- a/pkg/commands/serving/serving_triton.go +++ b/pkg/commands/serving/serving_triton.go @@ -18,7 +18,7 @@ func NewSubmitTritonServingJobCommand() *cobra.Command { Short: "Submit nvidia triton server job to deploy and serve machine learning models.", Aliases: []string{"triton"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/serving/traffic_router_split.go b/pkg/commands/serving/traffic_router_split.go index d250130f8..f5d9b49f1 100644 --- a/pkg/commands/serving/traffic_router_split.go +++ b/pkg/commands/serving/traffic_router_split.go @@ -31,7 +31,7 @@ func NewTrafficRouterSplitCommand() *cobra.Command { Short: "Adjust traffic routing dynamically for tfserving jobs", Aliases: []string{"trs", "traffic-router", "traffic-router-split", "traffic-shift", "traffic-shifting"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/serving/update_custom.go b/pkg/commands/serving/update_custom.go index aaba2c085..85707aa45 100644 --- a/pkg/commands/serving/update_custom.go +++ b/pkg/commands/serving/update_custom.go @@ -18,7 +18,7 @@ func NewUpdateCustomCommand() *cobra.Command { Use: "custom", Short: "Update a custom serving job and its associated instances", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/serving/update_kserve.go b/pkg/commands/serving/update_kserve.go index ebf7f2c31..20b1e2aa4 100644 --- a/pkg/commands/serving/update_kserve.go +++ b/pkg/commands/serving/update_kserve.go @@ -19,7 +19,7 @@ func NewUpdateKServeCommand() *cobra.Command { Use: "kserve", Short: "Update a kserve serving job", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/serving/update_tensorflow.go b/pkg/commands/serving/update_tensorflow.go index b73591993..7cee64d99 100644 --- a/pkg/commands/serving/update_tensorflow.go +++ b/pkg/commands/serving/update_tensorflow.go @@ -18,7 +18,7 @@ func NewUpdateTensorflowCommand() *cobra.Command { Use: "tensorflow", Short: "Update a tensorflow serving job and its associated instances", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/serving/update_triton.go b/pkg/commands/serving/update_triton.go index ed360ce9f..2e7d6a24e 100644 --- a/pkg/commands/serving/update_triton.go +++ b/pkg/commands/serving/update_triton.go @@ -18,7 +18,7 @@ func NewUpdateTritonCommand() *cobra.Command { Use: "triton", Short: "Update a triton serving job and its associated instances", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/top/job.go b/pkg/commands/top/job.go index 16cea2b66..5bfb7f8b8 100644 --- a/pkg/commands/top/job.go +++ b/pkg/commands/top/job.go @@ -22,7 +22,7 @@ func NewTopJobCommand() *cobra.Command { Use: "job", Short: "Display Resource (GPU) usage of jobs.", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { isDaemonMode := false @@ -50,7 +50,7 @@ func NewTopJobCommand() *cobra.Command { }, } command.Flags().BoolVar(&allNamespaces, "allNamespaces", false, "show all the namespaces") - command.Flags().MarkDeprecated("allNamespaces", "please use --all-namespaces instead") + _ = command.Flags().MarkDeprecated("allNamespaces", "please use --all-namespaces instead") command.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "show all the namespaces") command.Flags().StringVarP(&format, "output", "o", "wide", "Output format. One of: json|yaml|wide") command.Flags().BoolVarP(¬Stop, "refresh", "r", false, "Display continuously") diff --git a/pkg/commands/top/node.go b/pkg/commands/top/node.go index 305b34511..f1bf5da7e 100644 --- a/pkg/commands/top/node.go +++ b/pkg/commands/top/node.go @@ -39,7 +39,7 @@ func NewTopNodeCommand() *cobra.Command { Use: "node", Short: "Display Resource (GPU) usage of nodes.", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { isDaemonMode := false @@ -48,7 +48,7 @@ func NewTopNodeCommand() *cobra.Command { } now := time.Now() defer func() { - log.Debugf("execute time of top nodes: %v\n", time.Now().Sub(now)) + log.Debugf("execute time of top nodes: %v\n", time.Since(now)) }() client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ Kubeconfig: viper.GetString("config"), diff --git a/pkg/commands/training/attach.go b/pkg/commands/training/attach.go index ea3ab9081..2190fbbdc 100644 --- a/pkg/commands/training/attach.go +++ b/pkg/commands/training/attach.go @@ -32,7 +32,7 @@ func NewAttachCommand() *cobra.Command { Use: "attach JOB [-i INSTANCE] [-c CONTAINER]", Short: "Attach a training job and execute some commands", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/training/delete.go b/pkg/commands/training/delete.go index 5e8fdd534..ef0119209 100644 --- a/pkg/commands/training/delete.go +++ b/pkg/commands/training/delete.go @@ -17,15 +17,12 @@ package training import ( "fmt" - "github.com/kubeflow/arena/pkg/apis/arenaclient" - "github.com/kubeflow/arena/pkg/apis/types" - "github.com/kubeflow/arena/pkg/apis/utils" "github.com/spf13/cobra" "github.com/spf13/viper" -) -var ( - trainingType string + "github.com/kubeflow/arena/pkg/apis/arenaclient" + "github.com/kubeflow/arena/pkg/apis/types" + "github.com/kubeflow/arena/pkg/apis/utils" ) // NewDeleteCommand @@ -36,7 +33,7 @@ func NewDeleteCommand() *cobra.Command { Short: "Delete a training job and its associated instances", Aliases: []string{"del"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/training/get.go b/pkg/commands/training/get.go index eaf8e2793..daa988aba 100644 --- a/pkg/commands/training/get.go +++ b/pkg/commands/training/get.go @@ -18,17 +18,14 @@ import ( "fmt" "time" - "github.com/kubeflow/arena/pkg/apis/arenaclient" - "github.com/kubeflow/arena/pkg/apis/types" - "github.com/kubeflow/arena/pkg/apis/utils" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" -) - -var output string -var dashboardURL string + "github.com/kubeflow/arena/pkg/apis/arenaclient" + "github.com/kubeflow/arena/pkg/apis/types" + "github.com/kubeflow/arena/pkg/apis/utils" +) // NewGetCommand func NewGetCommand() *cobra.Command { @@ -40,7 +37,7 @@ func NewGetCommand() *cobra.Command { Use: "get JOB [-T JOB_TYPE]", Short: "Display a training job details", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { @@ -49,7 +46,7 @@ func NewGetCommand() *cobra.Command { } now := time.Now() defer func() { - log.Debugf("execute time of get training job: %v", time.Now().Sub(now)) + log.Debugf("execute time of get training job: %v", time.Since(now)) }() name := args[0] client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/training/list.go b/pkg/commands/training/list.go index 369c6b794..cfbc3263d 100644 --- a/pkg/commands/training/list.go +++ b/pkg/commands/training/list.go @@ -21,12 +21,12 @@ func NewListCommand() *cobra.Command { Short: "List all the training jobs", Aliases: []string{"ls"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { now := time.Now() defer func() { - log.Debugf("execute time of listing training jobs: %v\n", time.Now().Sub(now)) + log.Debugf("execute time of listing training jobs: %v\n", time.Since(now)) }() client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ Kubeconfig: viper.GetString("config"), @@ -43,7 +43,7 @@ func NewListCommand() *cobra.Command { } command.Flags().StringVarP(&jobType, "type", "T", "", fmt.Sprintf("The training type to list, the possible option is %v. (optional)", utils.GetSupportTrainingJobTypesInfo())) command.Flags().BoolVar(&allNamespaces, "allNamespaces", false, "show all the namespaces") - command.Flags().MarkDeprecated("allNamespaces", "please use --all-namespaces instead") + _ = command.Flags().MarkDeprecated("allNamespaces", "please use --all-namespaces instead") command.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "show all the namespaces") command.Flags().StringVarP(&format, "output", "o", "wide", "Output format. One of: json|yaml|wide") return command diff --git a/pkg/commands/training/logs.go b/pkg/commands/training/logs.go index 4e521d6ab..e580163b3 100644 --- a/pkg/commands/training/logs.go +++ b/pkg/commands/training/logs.go @@ -33,7 +33,7 @@ func NewLogsCommand() *cobra.Command { Short: "Print the logs of a training job", Aliases: []string{"log"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/training/logviewer.go b/pkg/commands/training/logviewer.go index 87d7709f5..43b914007 100644 --- a/pkg/commands/training/logviewer.go +++ b/pkg/commands/training/logviewer.go @@ -30,7 +30,7 @@ func NewLogViewerCommand() *cobra.Command { Use: "logviewer JOB [-T JOB_TYPE]", Short: "Display Log Viewer URL of a training job", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/training/prune.go b/pkg/commands/training/prune.go index 641c94ded..b97475128 100644 --- a/pkg/commands/training/prune.go +++ b/pkg/commands/training/prune.go @@ -30,7 +30,7 @@ func NewPruneCommand() *cobra.Command { Use: "prune history job", Short: "Prune the history jobs", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/training/scalein_etjob.go b/pkg/commands/training/scalein_etjob.go index de9494067..e429e0090 100644 --- a/pkg/commands/training/scalein_etjob.go +++ b/pkg/commands/training/scalein_etjob.go @@ -17,7 +17,7 @@ func NewScaleInETJobCommand() *cobra.Command { Short: "Scale in a elastic training job", Aliases: []string{"et"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/training/scaleout_etjob.go b/pkg/commands/training/scaleout_etjob.go index a0a4c6417..5e376bf22 100644 --- a/pkg/commands/training/scaleout_etjob.go +++ b/pkg/commands/training/scaleout_etjob.go @@ -17,7 +17,7 @@ func NewScaleOutETJobCommand() *cobra.Command { Short: "Scale out a elastic training job", Aliases: []string{"et"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/training/submit_deepspeedjob.go b/pkg/commands/training/submit_deepspeedjob.go index 81f57e49e..ce7717f26 100644 --- a/pkg/commands/training/submit_deepspeedjob.go +++ b/pkg/commands/training/submit_deepspeedjob.go @@ -32,7 +32,7 @@ func NewSubmitDeepSpeedJobCommand() *cobra.Command { Short: "Submit DeepSpeedJob as training job.", Aliases: []string{"dp"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/training/submit_etjob.go b/pkg/commands/training/submit_etjob.go index 7be1ea28b..56715e292 100644 --- a/pkg/commands/training/submit_etjob.go +++ b/pkg/commands/training/submit_etjob.go @@ -17,7 +17,7 @@ func NewSubmitETJobCommand() *cobra.Command { Short: "Submit ETJob as training job.", Aliases: []string{"et"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/training/submit_horovodjob.go b/pkg/commands/training/submit_horovodjob.go index d9806d129..f18608650 100644 --- a/pkg/commands/training/submit_horovodjob.go +++ b/pkg/commands/training/submit_horovodjob.go @@ -18,7 +18,7 @@ func NewSubmitHorovodJobCommand() *cobra.Command { Short: "Submit horovodjob as training job.", Aliases: []string{"horovod", "hj"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/training/submit_mpijob.go b/pkg/commands/training/submit_mpijob.go index 3b97ff831..e5e8445e3 100644 --- a/pkg/commands/training/submit_mpijob.go +++ b/pkg/commands/training/submit_mpijob.go @@ -31,7 +31,7 @@ func NewSubmitMPIJobCommand() *cobra.Command { Short: "Submit MPIjob as training job.", Aliases: []string{"mpi", "mj"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/training/submit_pytorchjob.go b/pkg/commands/training/submit_pytorchjob.go index e22b8a324..9b298b9ad 100644 --- a/pkg/commands/training/submit_pytorchjob.go +++ b/pkg/commands/training/submit_pytorchjob.go @@ -17,7 +17,7 @@ func NewSubmitPytorchJobCommand() *cobra.Command { Short: "Submit PyTorchJob as training job.", Aliases: []string{"pytorch"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/training/submit_sparkjob.go b/pkg/commands/training/submit_sparkjob.go index 7283741a8..26abb7840 100644 --- a/pkg/commands/training/submit_sparkjob.go +++ b/pkg/commands/training/submit_sparkjob.go @@ -17,7 +17,7 @@ func NewSubmitSparkJobCommand() *cobra.Command { Short: "Submit a common spark application job.", Aliases: []string{"spark"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/training/submit_tfjob.go b/pkg/commands/training/submit_tfjob.go index 6a3208a0c..f89f63164 100644 --- a/pkg/commands/training/submit_tfjob.go +++ b/pkg/commands/training/submit_tfjob.go @@ -17,7 +17,7 @@ func NewSubmitTFJobCommand() *cobra.Command { Short: "Submit a TFJob as training job.", Aliases: []string{"tf"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/pkg/commands/training/submit_volcanojob.go b/pkg/commands/training/submit_volcanojob.go index 3400ec48a..4623cea00 100644 --- a/pkg/commands/training/submit_volcanojob.go +++ b/pkg/commands/training/submit_volcanojob.go @@ -17,7 +17,7 @@ func NewVolcanoJobCommand() *cobra.Command { Short: "Submit a Volcano job.", Aliases: []string{"vj"}, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { client, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/commands/version.go b/pkg/commands/version.go index 87e9c1fb0..6cd42f1a3 100644 --- a/pkg/commands/version.go +++ b/pkg/commands/version.go @@ -28,7 +28,7 @@ func NewVersionCmd(cliName string) *cobra.Command { ) versionCmd := cobra.Command{ Use: "version", - Short: fmt.Sprintf("Print version information"), + Short: "Print version information", Run: func(cmd *cobra.Command, args []string) { version := arena.GetVersion() fmt.Printf("%s: %s\n", cliName, version) diff --git a/pkg/commands/whoami.go b/pkg/commands/whoami.go index 86a936463..a8f97c58c 100644 --- a/pkg/commands/whoami.go +++ b/pkg/commands/whoami.go @@ -17,7 +17,7 @@ func NewWhoamiCommand() *cobra.Command { Short: "Display current user information.", Long: "Display current user information.", PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { _, err := arenaclient.NewArenaClient(types.ArenaClientArgs{ diff --git a/pkg/cron/util.go b/pkg/cron/util.go index d1134e102..c0ccb7f8c 100644 --- a/pkg/cron/util.go +++ b/pkg/cron/util.go @@ -3,7 +3,6 @@ package cron import ( "fmt" "io" - "k8s.io/apimachinery/pkg/runtime/schema" "strings" "time" ) @@ -12,21 +11,11 @@ const ( formatLayout = "2006-01-02T15:04:05Z" ) -var gvr = schema.GroupVersionResource{ - Group: "apps.kubedl.io", - Version: "v1alpha1", - Resource: "crons", -} - func printLine(w io.Writer, fields ...string) { buffer := strings.Join(fields, "\t") fmt.Fprintln(w, buffer) } -func parseTime(strTime string) (time.Time, error) { - return time.Parse(formatLayout, strTime) -} - func formatTime(t time.Time) string { return t.Format(formatLayout) } diff --git a/pkg/datahouse/list.go b/pkg/datahouse/list.go index 60f72c008..5278a8004 100644 --- a/pkg/datahouse/list.go +++ b/pkg/datahouse/list.go @@ -132,5 +132,5 @@ func translateTimestamp(timestamp metav1.Time) string { if timestamp.IsZero() { return "" } - return util.ShortHumanDuration(time.Now().Sub(timestamp.Time)) + return util.ShortHumanDuration(time.Since(timestamp.Time)) } diff --git a/pkg/evaluate/util.go b/pkg/evaluate/util.go index 6ccbc92ac..047542a21 100644 --- a/pkg/evaluate/util.go +++ b/pkg/evaluate/util.go @@ -19,10 +19,6 @@ func printLine(w io.Writer, fields ...string) { fmt.Fprintln(w, buffer) } -func parseTime(strTime string) (time.Time, error) { - return time.Parse(formatLayout, strTime) -} - func formatTime(t time.Time) string { return t.Format(formatLayout) } diff --git a/pkg/k8saccesser/k8s_accesser.go b/pkg/k8saccesser/k8s_accesser.go index 8f4c49b38..ea6054be6 100644 --- a/pkg/k8saccesser/k8s_accesser.go +++ b/pkg/k8saccesser/k8s_accesser.go @@ -16,6 +16,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -43,13 +44,13 @@ var accesser *k8sResourceAccesser var once sync.Once func init() { - tfv1.AddToScheme(scheme.Scheme) - v1alpha1.AddToScheme(scheme.Scheme) - v1alpha12.AddToScheme(scheme.Scheme) - pytorch_v1.AddToScheme(scheme.Scheme) - spark_v1beta2.AddToScheme(scheme.Scheme) - volcano_v1alpha1.AddToScheme(scheme.Scheme) - cron_v1alpha1.AddToScheme(scheme.Scheme) + utilruntime.Must(tfv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(v1alpha1.AddToScheme(scheme.Scheme)) + utilruntime.Must(v1alpha12.AddToScheme(scheme.Scheme)) + utilruntime.Must(pytorch_v1.AddToScheme(scheme.Scheme)) + utilruntime.Must(spark_v1beta2.AddToScheme(scheme.Scheme)) + utilruntime.Must(volcano_v1alpha1.AddToScheme(scheme.Scheme)) + utilruntime.Must(cron_v1alpha1.AddToScheme(scheme.Scheme)) } func InitK8sResourceAccesser(config *rest.Config, clientset *kubernetes.Clientset, isDaemonMode bool) error { @@ -88,7 +89,7 @@ func NewK8sResourceAccesser(config *rest.Config, clientset *kubernetes.Clientset log.Errorf("failed to create cacheClient, reason: %v", err) return nil, err } - cacheClient.IndexField(context.TODO(), &v1.Pod{}, "spec.nodeName", func(o client.Object) []string { + _ = cacheClient.IndexField(context.TODO(), &v1.Pod{}, "spec.nodeName", func(o client.Object) []string { if pod, ok := o.(*v1.Pod); ok { return []string{pod.Spec.NodeName} } @@ -879,13 +880,3 @@ func parseFieldSelector(item string) (fields.Selector, error) { } return selector, nil } - -func createClientListOptions(labelSelector labels.Selector, fieldSelector fields.Selector) *client.ListOptions { - options := &client.ListOptions{ - LabelSelector: labelSelector, - } - if !fieldSelector.Empty() { - options.FieldSelector = fieldSelector - } - return options -} diff --git a/pkg/model/get.go b/pkg/model/get.go index 20a9eacde..f1f6f514d 100644 --- a/pkg/model/get.go +++ b/pkg/model/get.go @@ -91,6 +91,6 @@ func PrintModelJob(job ModelJob, format types.FormatStyle) { jobInfo.Age, strings.Join(lines, "\n"), ) - fmt.Fprintf(w, output) + fmt.Fprint(w, output) w.Flush() } diff --git a/pkg/model/model.go b/pkg/model/model.go index c86840c60..5fdf0312d 100644 --- a/pkg/model/model.go +++ b/pkg/model/model.go @@ -138,7 +138,7 @@ func (m *modelJob) Job() *batchv1.Job { } func (m *modelJob) Age() time.Duration { - return time.Now().Sub(m.job.ObjectMeta.CreationTimestamp.Time) + return time.Since(m.job.ObjectMeta.CreationTimestamp.Time) } func (m *modelJob) Duration() time.Duration { @@ -232,7 +232,7 @@ func (m *modelJob) Instances() []types.ModelJobInstance { var instances []types.ModelJobInstance for index, pod := range m.pods { status, totalContainers, restart, readyContainer := utils.DefinePodPhaseStatus(*pod) - age := util.ShortHumanDuration(time.Now().Sub(pod.ObjectMeta.CreationTimestamp.Time)) + age := util.ShortHumanDuration(time.Since(pod.ObjectMeta.CreationTimestamp.Time)) gpuMemory := utils.GPUMemoryCountInPod(pod) gpuCore := utils.GPUCoreCountInPod(pod) gpus := getPodGPUs(pod, gpuMemory, index) diff --git a/pkg/operators/tf-operator/apis/tensorflow/v1alpha1/defaults.go b/pkg/operators/tf-operator/apis/tensorflow/v1alpha1/defaults.go index 7f7e39460..ba7f1fc22 100644 --- a/pkg/operators/tf-operator/apis/tensorflow/v1alpha1/defaults.go +++ b/pkg/operators/tf-operator/apis/tensorflow/v1alpha1/defaults.go @@ -15,7 +15,7 @@ package v1alpha1 import ( - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" "k8s.io/apimachinery/pkg/runtime" ) diff --git a/pkg/operators/tf-operator/apis/tensorflow/v1alpha2/defaults.go b/pkg/operators/tf-operator/apis/tensorflow/v1alpha2/defaults.go index 5e21bdd09..84123dc51 100644 --- a/pkg/operators/tf-operator/apis/tensorflow/v1alpha2/defaults.go +++ b/pkg/operators/tf-operator/apis/tensorflow/v1alpha2/defaults.go @@ -77,7 +77,7 @@ func setTypeNamesToCamelCase(tfJob *TFJob) { // E.g. from ps to PS; from WORKER to Worker. func setTypeNameToCamelCase(tfJob *TFJob, typ TFReplicaType) { for t := range tfJob.Spec.TFReplicaSpecs { - if strings.ToLower(string(t)) == strings.ToLower(string(typ)) && t != typ { + if strings.EqualFold(string(t), string(typ)) && t != typ { spec := tfJob.Spec.TFReplicaSpecs[t] delete(tfJob.Spec.TFReplicaSpecs, t) tfJob.Spec.TFReplicaSpecs[typ] = spec diff --git a/pkg/operators/tf-operator/apis/tensorflow/validation/validation_test.go b/pkg/operators/tf-operator/apis/tensorflow/validation/validation_test.go index 2fe7949bb..f2c78c502 100644 --- a/pkg/operators/tf-operator/apis/tensorflow/validation/validation_test.go +++ b/pkg/operators/tf-operator/apis/tensorflow/validation/validation_test.go @@ -17,13 +17,13 @@ package validation import ( "testing" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" + "k8s.io/api/core/v1" + commonv1 "github.com/kubeflow/arena/pkg/operators/tf-operator/apis/common/v1" commonv1beta2 "github.com/kubeflow/arena/pkg/operators/tf-operator/apis/common/v1beta2" tfv1 "github.com/kubeflow/arena/pkg/operators/tf-operator/apis/tensorflow/v1" tfv1beta2 "github.com/kubeflow/arena/pkg/operators/tf-operator/apis/tensorflow/v1beta2" - - "k8s.io/api/core/v1" ) func TestValidateBetaTwoTFJobSpec(t *testing.T) { diff --git a/pkg/operators/volcano-operator/client/clientset/versioned/scheme/register.go b/pkg/operators/volcano-operator/client/clientset/versioned/scheme/register.go index a904a3fce..cedf28ca9 100644 --- a/pkg/operators/volcano-operator/client/clientset/versioned/scheme/register.go +++ b/pkg/operators/volcano-operator/client/clientset/versioned/scheme/register.go @@ -17,11 +17,13 @@ limitations under the License. package scheme import ( - batchv1alpha1 "github.com/kubeflow/arena/pkg/operators/volcano-operator/apis/batch/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + batchv1alpha1 "github.com/kubeflow/arena/pkg/operators/volcano-operator/apis/batch/v1alpha1" ) var Scheme = runtime.NewScheme() @@ -48,6 +50,5 @@ func init() { // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. func AddToScheme(scheme *runtime.Scheme) { - batchv1alpha1.AddToScheme(scheme) - + utilruntime.Must(batchv1alpha1.AddToScheme(scheme)) } diff --git a/pkg/podexec/exec.go b/pkg/podexec/exec.go index 67d13127a..8531956c1 100644 --- a/pkg/podexec/exec.go +++ b/pkg/podexec/exec.go @@ -103,9 +103,8 @@ type ExecOptions struct { ParentCommandName string EnableSuggestedCmdUsage bool - Builder func() *resource.Builder - ExecutablePodFn polymorphichelpers.AttachablePodForObjectFunc - restClientGetter genericclioptions.RESTClientGetter + Builder func() *resource.Builder + ExecutablePodFn polymorphichelpers.AttachablePodForObjectFunc Pod *corev1.Pod Executor RemoteExecutor diff --git a/pkg/podlogs/logger.go b/pkg/podlogs/logger.go index c957dc1ab..ce6035c95 100644 --- a/pkg/podlogs/logger.go +++ b/pkg/podlogs/logger.go @@ -53,11 +53,15 @@ func (p *PodLogger) AcceptLogs() (int, error) { if err := p.getLogs(func(reader io.ReadCloser) { defer p.Writer.Close() defer reader.Close() - io.Copy(p.Writer, reader) + if _, err := io.Copy(p.Writer, reader); err != nil { + log.Debugf("get logs failed, err: %s", err) + } }); err != nil { return 1, err } - io.Copy(p.WriterCloser, p.Reader) + if _, err := io.Copy(p.WriterCloser, p.Reader); err != nil { + log.Debugf("get logs failed, err: %s", err) + } return 0, nil } diff --git a/pkg/prometheus/prom.go b/pkg/prometheus/prom.go index 67de88975..db089ab33 100644 --- a/pkg/prometheus/prom.go +++ b/pkg/prometheus/prom.go @@ -63,20 +63,6 @@ func GetPodsGpuInfo(client *kubernetes.Clientset, podNames []string) (JobGpuMetr return *jobMetric, nil } -func getMetricAverage(metrics []types.GpuMetricInfo) float64 { - var result float64 - result = 0 - for _, metric := range metrics { - v, _ := strconv.ParseFloat(metric.Value, 64) - result = result + v - } - if result == 0 { - return result - } - result = result / float64(len(metrics)) - return result -} - // {__name__=~"nvidia_gpu_duty_cycle|nvidia_gpu_memory_used_bytes|nvidia_gpu_memory_total_bytes", pod_name=~"tf-distributed-test-ps-0|tf-distributed-test-worker-0"} func GetNodeGPUMetrics(client *kubernetes.Clientset, nodeNames []string) (map[string]types.NodeGpuMetric, error) { diff --git a/pkg/prometheus/query.go b/pkg/prometheus/query.go index 0603315e2..e1f3ad486 100644 --- a/pkg/prometheus/query.go +++ b/pkg/prometheus/query.go @@ -213,21 +213,6 @@ func queryPrometheusMetricsProxyByAPIServer(client *kubernetes.Clientset, query return gpuMetric, nil } -func prometheusInstalled(client *kubernetes.Clientset) bool { - server := getPrometheusServer(client) - if server == nil { - return false - } - log.Debugf("get prometheus service: %v", server.Service) - return true - //gpuDeviceMetrics, _ := QueryMetricByPrometheus(client, server, "nvidia_gpu_num_devices") - //return len(gpuDeviceMetrics) > 0 -} - -func GetPrometheusServer(client *kubernetes.Clientset) *types.PrometheusServer { - return getPrometheusServer(client) -} - // GetPrometheusServer get the matched prometheus server from the supported prometheus server func getPrometheusServer(client *kubernetes.Clientset) *types.PrometheusServer { for _, s := range types.SUPPORT_PROMETHEUS_SERVERS { diff --git a/pkg/serving/get.go b/pkg/serving/get.go index 86ed91384..d704fa0f0 100644 --- a/pkg/serving/get.go +++ b/pkg/serving/get.go @@ -92,7 +92,7 @@ func validateJobs(jobs []ServingJob, name string) error { knownJobs := []ServingJob{} unknownJobs := []ServingJob{} for _, s := range jobs { - labels := map[string]string{} + var labels map[string]string if ksjob, ok := s.(*kserveJob); ok { labels = ksjob.inferenceService.Labels } else { @@ -200,6 +200,6 @@ func PrintServingJob(job ServingJob, format types.FormatStyle) { strings.Join(ports, ","), strings.Join(lines, "\n"), ) - fmt.Fprintf(w, output) + fmt.Fprint(w, output) w.Flush() } diff --git a/pkg/serving/list.go b/pkg/serving/list.go index a8a712c99..eb1894260 100644 --- a/pkg/serving/list.go +++ b/pkg/serving/list.go @@ -8,19 +8,16 @@ import ( "sync" "text/tabwriter" - "github.com/kubeflow/arena/pkg/apis/types" - "github.com/kubeflow/arena/pkg/apis/utils" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" -) -var ( - errUnknownServingJobType = fmt.Errorf("unknown serving types,only support: %v", utils.GetServingJobTypes()) + "github.com/kubeflow/arena/pkg/apis/types" + "github.com/kubeflow/arena/pkg/apis/utils" ) func ListServingJobs(namespace string, allNamespace bool, servingType types.ServingJobType) ([]ServingJob, error) { if servingType == types.UnknownServingJob { - return nil, fmt.Errorf("Unknown serving job type,arena only supports: [%s]", utils.GetSupportServingJobTypesInfo()) + return nil, fmt.Errorf("unknown serving job type,arena only supports: [%s]", utils.GetSupportServingJobTypesInfo()) } processers := GetAllProcesser() if servingType != types.AllServingJob { @@ -58,7 +55,7 @@ func ListServingJobs(namespace string, allNamespace bool, servingType types.Serv if noPrivileges { item := fmt.Sprintf("namespace %v", namespace) if allNamespace { - item = fmt.Sprintf("all namespaces") + item = "all namespaces" } return nil, fmt.Errorf("the user has no privileges to list the serving jobs in %v", item) } diff --git a/pkg/serving/serving.go b/pkg/serving/serving.go index ee6dba5dd..5218f1be0 100644 --- a/pkg/serving/serving.go +++ b/pkg/serving/serving.go @@ -1,7 +1,6 @@ package serving import ( - "errors" "fmt" "math" "strings" @@ -35,10 +34,6 @@ const ( istioGatewayHTTPsPortName = "https" ) -var ( - errServingJobNotFound = errors.New("serving job not found") -) - var processers map[types.ServingJobType]Processer var once sync.Once @@ -153,7 +148,7 @@ func (s *servingJob) IPAddress() string { } func (s *servingJob) Age() time.Duration { - return time.Now().Sub(s.deployment.ObjectMeta.CreationTimestamp.Time) + return time.Since(s.deployment.ObjectMeta.CreationTimestamp.Time) } func (s *servingJob) StartTime() *metav1.Time { @@ -275,7 +270,7 @@ func (s *servingJob) Instances() []types.ServingInstance { instances := []types.ServingInstance{} for index, pod := range s.pods { status, totalContainers, restart, readyContainer := utils.DefinePodPhaseStatus(*pod) - age := util.ShortHumanDuration(time.Now().Sub(pod.ObjectMeta.CreationTimestamp.Time)) + age := util.ShortHumanDuration(time.Since(pod.ObjectMeta.CreationTimestamp.Time)) gpuMemory := utils.GPUMemoryCountInPod(pod) gpuCore := utils.GPUCoreCountInPod(pod) gpus := getPodGPUs(pod, gpuMemory, index) diff --git a/pkg/serving/serving_kserve.go b/pkg/serving/serving_kserve.go index 51dfd67b6..8bdc6514c 100644 --- a/pkg/serving/serving_kserve.go +++ b/pkg/serving/serving_kserve.go @@ -168,7 +168,7 @@ func (s *kserveJob) Uid() string { } func (s *kserveJob) Age() time.Duration { - return time.Now().Sub(s.inferenceService.ObjectMeta.CreationTimestamp.Time) + return time.Since(s.inferenceService.ObjectMeta.CreationTimestamp.Time) } func (s *kserveJob) StartTime() *metav1.Time { diff --git a/pkg/serving/serving_seldon.go b/pkg/serving/serving_seldon.go index 7d2487ad7..f11665049 100644 --- a/pkg/serving/serving_seldon.go +++ b/pkg/serving/serving_seldon.go @@ -204,9 +204,7 @@ func (p *SeldonServingProcesser) FilterServingJobs(namespace string, allNamespac continue } - for _, svc := range services { - filterServices = append(filterServices, svc) - } + filterServices = append(filterServices, services...) job := &servingJob{ name: deployment.Labels[servingNameLabelKey], diff --git a/pkg/serving/traffic_router_split.go b/pkg/serving/traffic_router_split.go index 0498f75ea..812258023 100644 --- a/pkg/serving/traffic_router_split.go +++ b/pkg/serving/traffic_router_split.go @@ -19,8 +19,6 @@ import ( "encoding/json" "strings" - "github.com/kubeflow/arena/pkg/apis/config" - "github.com/kubeflow/arena/pkg/apis/types" log "github.com/sirupsen/logrus" istiov1alpha3 "istio.io/api/networking/v1alpha3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,11 +26,9 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/rest" -) -var ( - modelPathSeparator = ":" - regexp4serviceName = "^[a-z0-9A-Z_-]+$" + "github.com/kubeflow/arena/pkg/apis/config" + "github.com/kubeflow/arena/pkg/apis/types" ) func RunTrafficRouterSplit(namespace string, args *types.TrafficRouterSplitArgs) (err error) { @@ -48,8 +44,14 @@ func RunTrafficRouterSplit(namespace string, args *types.TrafficRouterSplitArgs) } log.Debugf("serviceName: %s", preprocessObject.ServiceName) jsonDestinationRule, err := json.Marshal(preprocessObject.DestinationRule) + if err != nil { + return err + } log.Debugf("destination rule: %s", jsonDestinationRule) jsonVirtualService, err := json.Marshal(preprocessObject.VirtualService) + if err != nil { + return err + } log.Debugf("virtual service: %s", jsonVirtualService) virtualServiceName := preprocessObject.ServiceName log.Debugf("virtualServiceName:%s", virtualServiceName) diff --git a/pkg/serving/update.go b/pkg/serving/update.go index 411c89deb..0f2b36f83 100644 --- a/pkg/serving/update.go +++ b/pkg/serving/update.go @@ -328,13 +328,10 @@ func findAndBuildDeployment(args *types.CommonUpdateServingArgs) (*appsv1.Deploy switch args.Type { case types.TFServingJob: suffix = "tensorflow-serving" - break case types.TritonServingJob: suffix = "tritoninferenceserver" - break case types.CustomServingJob: suffix = "custom-serving" - break default: return nil, fmt.Errorf("invalid serving job type [%s]", args.Type) } @@ -361,23 +358,17 @@ func findAndBuildDeployment(args *types.CommonUpdateServingArgs) (*appsv1.Deploy if args.GPUCount > 0 { resourceLimits[ResourceGPU] = resource.MustParse(strconv.Itoa(args.GPUCount)) - if _, ok := resourceLimits[ResourceGPUMemory]; ok { - delete(resourceLimits, ResourceGPUMemory) - } + delete(resourceLimits, ResourceGPUMemory) } if args.GPUMemory > 0 { resourceLimits[ResourceGPUMemory] = resource.MustParse(strconv.Itoa(args.GPUMemory)) - if _, ok := resourceLimits[ResourceGPU]; ok { - delete(resourceLimits, ResourceGPU) - } + delete(resourceLimits, ResourceGPU) } if args.GPUCore > 0 && args.GPUCore%5 == 0 { resourceLimits[ResourceGPUCore] = resource.MustParse(strconv.Itoa(args.GPUCore)) - if _, ok := resourceLimits[ResourceGPU]; ok { - delete(resourceLimits, ResourceGPU) - } + delete(resourceLimits, ResourceGPU) } if args.Cpu != "" { @@ -515,21 +506,15 @@ func setInferenceServiceForFrameworkModel(args *types.UpdateKServeArgs, inferenc } if args.GPUCount > 0 { resourceLimits[ResourceGPU] = resource.MustParse(strconv.Itoa(args.GPUCount)) - if _, ok := resourceLimits[ResourceGPUMemory]; ok { - delete(resourceLimits, ResourceGPUMemory) - } + delete(resourceLimits, ResourceGPUMemory) } if args.GPUMemory > 0 { resourceLimits[ResourceGPUMemory] = resource.MustParse(strconv.Itoa(args.GPUMemory)) - if _, ok := resourceLimits[ResourceGPU]; ok { - delete(resourceLimits, ResourceGPU) - } + delete(resourceLimits, ResourceGPU) } if args.GPUCore > 0 && args.GPUCore%5 == 0 { resourceLimits[ResourceGPUCore] = resource.MustParse(strconv.Itoa(args.GPUCore)) - if _, ok := resourceLimits[ResourceGPU]; ok { - delete(resourceLimits, ResourceGPU) - } + delete(resourceLimits, ResourceGPU) } if args.Cpu != "" { resourceLimits[v1.ResourceCPU] = resource.MustParse(args.Cpu) @@ -585,21 +570,15 @@ func setInferenceServiceForCustomModel(args *types.UpdateKServeArgs, inferenceSe } if args.GPUCount > 0 { resourceLimits[ResourceGPU] = resource.MustParse(strconv.Itoa(args.GPUCount)) - if _, ok := resourceLimits[ResourceGPUMemory]; ok { - delete(resourceLimits, ResourceGPUMemory) - } + delete(resourceLimits, ResourceGPUMemory) } if args.GPUMemory > 0 { resourceLimits[ResourceGPUMemory] = resource.MustParse(strconv.Itoa(args.GPUMemory)) - if _, ok := resourceLimits[ResourceGPU]; ok { - delete(resourceLimits, ResourceGPU) - } + delete(resourceLimits, ResourceGPU) } if args.GPUCore > 0 && args.GPUCore%5 == 0 { resourceLimits[ResourceGPUCore] = resource.MustParse(strconv.Itoa(args.GPUCore)) - if _, ok := resourceLimits[ResourceGPU]; ok { - delete(resourceLimits, ResourceGPU) - } + delete(resourceLimits, ResourceGPU) } if args.Cpu != "" { resourceLimits[v1.ResourceCPU] = resource.MustParse(args.Cpu) diff --git a/pkg/serving/util.go b/pkg/serving/util.go index d648aea60..6b968d404 100644 --- a/pkg/serving/util.go +++ b/pkg/serving/util.go @@ -21,7 +21,7 @@ func moreThanOneJobHelpInfo(infos []ServingJob) string { header := fmt.Sprintf("There is %d jobs have been found:", len(infos)) tableHeader := "NAME\tTYPE\tVERSION" lines := []string{tableHeader} - footer := fmt.Sprintf("please use '--type' or '--version' to filter.") + footer := "please use '--type' or '--version' to filter." for _, info := range infos { line := fmt.Sprintf("%s\t%s\t%s", info.Name(), @@ -36,7 +36,7 @@ func moreThanOneJobHelpInfo(infos []ServingJob) string { func moreThanOneInstanceHelpInfo(instances []types.ServingInstance) string { header := fmt.Sprintf("There is %d instances have been found:", len(instances)) lines := []string{} - footer := fmt.Sprintf("please use '-i' or '--instance' to filter.") + footer := "please use '-i' or '--instance' to filter." for _, i := range instances { lines = append(lines, fmt.Sprintf("%v", i.Name)) } @@ -60,7 +60,7 @@ func ValidateJobsBeforeSubmiting(jobs []ServingJob, name string) error { knownJobs := []ServingJob{} unknownJobs := []ServingJob{} for _, s := range jobs { - labels := map[string]string{} + var labels map[string]string if ksjob, ok := s.(*kserveJob); ok { labels = ksjob.inferenceService.Labels } else { diff --git a/pkg/topnode/gpuexclusive.go b/pkg/topnode/gpuexclusive.go index 8e5acb05f..5822c0468 100644 --- a/pkg/topnode/gpuexclusive.go +++ b/pkg/topnode/gpuexclusive.go @@ -5,10 +5,11 @@ import ( "strings" "text/tabwriter" - "github.com/kubeflow/arena/pkg/apis/types" - "github.com/kubeflow/arena/pkg/apis/utils" v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" + + "github.com/kubeflow/arena/pkg/apis/types" + "github.com/kubeflow/arena/pkg/apis/utils" ) var GPUExclusiveNodeDescription = ` @@ -86,18 +87,6 @@ func (g *gpuexclusive) getAllocatedGPUs() float64 { return float64(allocatedGPUs) } -func (g *gpuexclusive) getTotalGPUMemory() float64 { - totalGPUMemory := float64(0) - for _, metric := range g.gpuMetrics { - totalGPUMemory += metric.GpuMemoryTotal - } - // if gpu metric is enable,return the value given by prometheus - if totalGPUMemory != 0 { - return totalGPUMemory - } - return float64(0) -} - func (g *gpuexclusive) getAllocatedGPUMemory() float64 { if !g.gpuMetricsIsEnabled() { return float64(0) @@ -117,34 +106,6 @@ func (g *gpuexclusive) getAllocatedGPUMemory() float64 { return allocatedGPUMemory } -func (g *gpuexclusive) getUsedGPUMemory() float64 { - // can not to detect gpu memory if no gpu metrics data - if !g.gpuMetricsIsEnabled() { - return float64(0) - } - usedGPUMemory := float64(0) - for _, gpuMetric := range g.gpuMetrics { - usedGPUMemory += gpuMetric.GpuMemoryUsed - } - return usedGPUMemory -} - -func (g *gpuexclusive) getDutyCycle() float64 { - if !g.gpuMetricsIsEnabled() { - return float64(0) - } - dutyCycle := float64(0) - totalGPUs := float64(0) - for _, gpuMetric := range g.gpuMetrics { - totalGPUs += float64(1) - dutyCycle += gpuMetric.GpuDutyCycle - } - if totalGPUs == 0 { - return float64(0) - } - return dutyCycle / totalGPUs -} - func (g *gpuexclusive) getUnhealthyGPUs() float64 { totalGPUs := g.getTotalGPUs() allocatableGPUs, ok := g.node.Status.Allocatable[v1.ResourceName(types.NvidiaGPUResourceName)] @@ -157,13 +118,6 @@ func (g *gpuexclusive) getUnhealthyGPUs() float64 { return totalGPUs - float64(allocatableGPUs.Value()) } -func (g *gpuexclusive) getTotalGPUMemoryOfDevice(id string) float64 { - if metric, ok := g.gpuMetrics[id]; ok { - return metric.GpuMemoryTotal - } - return 0 -} - func (g *gpuexclusive) convert2NodeInfo() types.GPUExclusiveNodeInfo { podInfos := []types.GPUExclusivePodInfo{} metrics := []*types.AdvancedGpuMetric{} diff --git a/pkg/topnode/gpushare.go b/pkg/topnode/gpushare.go index 2427c0a56..cdec162ca 100644 --- a/pkg/topnode/gpushare.go +++ b/pkg/topnode/gpushare.go @@ -44,17 +44,6 @@ GPU Summary: Used GPU Memory: %.1f GiB ` -var gpushareWithGpuCoreSummary = ` -GPU Summary: - Total GPUs: %v - Allocated GPUs: %v - Unhealthy GPUs: %v - Total GPU Memory: %.1f GiB - Allocated GPU Memory: %.1f GiB - Used GPU Memory: %d - Total GPU Core: %d -` - type gpushare struct { node *v1.Node pods []*v1.Pod @@ -77,10 +66,6 @@ func NewGPUShareNode(client *kubernetes.Clientset, node *v1.Node, index int, arg }, nil } -func (g *gpushare) gpuMetricsIsEnabled() bool { - return len(g.gpuMetrics) != 0 -} - func (g *gpushare) getTotalGPUs() float64 { if len(g.gpuMetrics) != 0 { return float64(len(g.gpuMetrics)) @@ -163,33 +148,6 @@ func (g *gpushare) getAllocatedGPUCore() int64 { return allocatedGPUCore } -func (g *gpushare) getUsedGPUMemory() float64 { - if !g.gpuMetricsIsEnabled() { - return float64(0) - } - usedGPUMemory := float64(0) - for _, gpuMetric := range g.gpuMetrics { - usedGPUMemory += gpuMetric.GpuMemoryUsed - } - return usedGPUMemory -} - -func (g *gpushare) getDutyCycle() float64 { - if !g.gpuMetricsIsEnabled() { - return float64(0) - } - dutyCycle := float64(0) - totalGPUs := float64(0) - for _, gpuMetric := range g.gpuMetrics { - totalGPUs += float64(1) - dutyCycle += gpuMetric.GpuDutyCycle - } - if totalGPUs == 0 { - return float64(0) - } - return dutyCycle / totalGPUs -} - func (g *gpushare) getUnhealthyGPUs() float64 { totalGPUs := g.getTotalGPUs() totalGPUMemory, ok := g.node.Status.Capacity[v1.ResourceName(types.GPUShareResourceName)] @@ -676,7 +634,7 @@ func displayGPUShareNodesCustomSummary(w *tabwriter.Writer, nodes []Node) { if nodeInfo.TotalGPUCore > 0 { items = append(items, fmt.Sprintf("%d/%d", nodeInfo.AllocatedGPUCore, nodeInfo.TotalGPUCore)) } else { - items = append(items, fmt.Sprintf("__")) + items = append(items, "__") } if isUnhealthy { items = append(items, fmt.Sprintf("%v", nodeInfo.UnhealthyGPUs)) diff --git a/pkg/topnode/gputopology.go b/pkg/topnode/gputopology.go index 85ed12742..1cfced425 100644 --- a/pkg/topnode/gputopology.go +++ b/pkg/topnode/gputopology.go @@ -3,6 +3,7 @@ package topnode import ( "encoding/json" "fmt" + log "github.com/sirupsen/logrus" "strings" "text/tabwriter" @@ -99,68 +100,6 @@ func (g *gputopo) getAllocatedGPUs() float64 { return float64(allocatedGPUs) } -func (g *gputopo) getTotalGPUMemory() float64 { - totalGPUMemory := float64(0) - for _, metric := range g.gpuMetrics { - totalGPUMemory += metric.GpuMemoryTotal - } - // if gpu metric is enable,return the value given by prometheus - if totalGPUMemory != 0 { - return totalGPUMemory - } - return float64(0) -} - -func (g *gputopo) getAllocatedGPUMemory() float64 { - if !g.gpuMetricsIsEnabled() { - return float64(0) - } - allocatedGPUMemory := float64(0) - allocatedGPUs := map[string]bool{} - for _, pod := range g.pods { - if utils.IsCompletedPod(pod) { - continue - } - allocation := utils.GetPodGPUTopologyAllocation(pod) - for _, gpuId := range allocation { - allocatedGPUs[gpuId] = true - } - } - for key, metric := range g.gpuMetrics { - if allocatedGPUs[key] { - allocatedGPUMemory += metric.GpuMemoryTotal - } - } - return utils.DataUnitTransfer("GiB", "bytes", allocatedGPUMemory) -} - -func (g *gputopo) getUsedGPUMemory() float64 { - if !g.gpuMetricsIsEnabled() { - return float64(0) - } - usedGPUMemory := float64(0) - for _, gpuMetric := range g.gpuMetrics { - usedGPUMemory += gpuMetric.GpuMemoryUsed - } - return usedGPUMemory -} - -func (g *gputopo) getDutyCycle() float64 { - if !g.gpuMetricsIsEnabled() { - return float64(0) - } - dutyCycle := float64(0) - totalGPUs := float64(0) - for _, gpuMetric := range g.gpuMetrics { - totalGPUs += float64(1) - dutyCycle += gpuMetric.GpuDutyCycle - } - if totalGPUs == 0 { - return float64(0) - } - return dutyCycle / totalGPUs -} - func (g *gputopo) getUnhealthyGPUs() float64 { totalGPUs := g.getTotalGPUs() allocatableGPUs, ok := g.node.Status.Allocatable[v1.ResourceName(types.AliyunGPUResourceName)] @@ -173,13 +112,6 @@ func (g *gputopo) getUnhealthyGPUs() float64 { return totalGPUs - float64(allocatableGPUs.Value()) } -func (g *gputopo) getTotalGPUMemoryOfDevice(id string) float64 { - if metric, ok := g.gpuMetrics[id]; ok { - return metric.GpuMemoryTotal - } - return 0 -} - func (g *gputopo) convert2NodeInfo() types.GPUTopologyNodeInfo { podInfos := []types.GPUTopologyPodInfo{} // 1.initilize the common node information @@ -201,7 +133,9 @@ func (g *gputopo) convert2NodeInfo() types.GPUTopologyNodeInfo { deviceMap := map[string]types.GPUTopologyNodeDevice{} if val, ok := g.configmap.Data["devices"]; ok { devicesFromConfigmap := map[string]string{} - json.Unmarshal([]byte(val), &devicesFromConfigmap) + if err := json.Unmarshal([]byte(val), &devicesFromConfigmap); err != nil { + log.Debugf("get devices from configmap failed, err: %s", err) + } for id, health := range devicesFromConfigmap { healthy := false if health == "Healthy" { @@ -249,10 +183,10 @@ func (g *gputopo) convert2NodeInfo() types.GPUTopologyNodeInfo { BandwidthMatrix: [][]float32{}, } if val, ok := g.configmap.Data["linkType"]; ok { - json.Unmarshal([]byte(val), &topology.LinkMatrix) + _ = json.Unmarshal([]byte(val), &topology.LinkMatrix) } if val, ok := g.configmap.Data["bandwith"]; ok { - json.Unmarshal([]byte(val), &topology.BandwidthMatrix) + _ = json.Unmarshal([]byte(val), &topology.BandwidthMatrix) } gpuTopologyNodeInfo.PodInfos = podInfos gpuTopologyNodeInfo.GPUTopology = topology @@ -288,22 +222,20 @@ func (g *gputopo) WideFormat() string { if len(nodeInfo.GPUTopology.LinkMatrix) != 0 { header := []string{" "} lines = append(lines, "LinkTypeMatrix:") - for index, _ := range nodeInfo.Devices { + for index := range nodeInfo.Devices { header = append(header, fmt.Sprintf("GPU%v", index)) } lines = append(lines, strings.Join(header, "\t")) for row, links := range nodeInfo.GPUTopology.LinkMatrix { linkLine := []string{fmt.Sprintf(" GPU%v", row)} - for _, link := range links { - linkLine = append(linkLine, link) - } + linkLine = append(linkLine, links...) lines = append(lines, strings.Join(linkLine, "\t")) } } if len(nodeInfo.GPUTopology.BandwidthMatrix) != 0 { header := []string{" "} lines = append(lines, "BandwidthMatrix:") - for index, _ := range nodeInfo.Devices { + for index := range nodeInfo.Devices { header = append(header, fmt.Sprintf("GPU%v", index)) } lines = append(lines, strings.Join(header, "\t")) @@ -432,6 +364,7 @@ func (g *gputopo) displayDeviceInfoUnderMetrics(lines []string, nodeInfo types.G utils.DataUnitTransfer("bytes", "GiB", totalAllocatedGPUMemory), utils.DataUnitTransfer("bytes", "GiB", totalUsedGPUMemory), )) + lines = append(lines, deviceLines...) return lines } diff --git a/pkg/topnode/normal.go b/pkg/topnode/normal.go index 79137ab7b..50d7b1517 100644 --- a/pkg/topnode/normal.go +++ b/pkg/topnode/normal.go @@ -5,9 +5,10 @@ import ( "strings" "text/tabwriter" - "github.com/kubeflow/arena/pkg/apis/types" v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" + + "github.com/kubeflow/arena/pkg/apis/types" ) var NormalNodeDescription = ` @@ -126,7 +127,7 @@ func displayNormalNodeSummary(w *tabwriter.Writer, nodes []Node, isUnhealthy, sh } } if isUnhealthy { - items = append(items, fmt.Sprintf("0")) + items = append(items, "0") } PrintLine(w, items...) } diff --git a/pkg/training/const.go b/pkg/training/const.go index f151cb05a..d641e18a9 100644 --- a/pkg/training/const.go +++ b/pkg/training/const.go @@ -14,33 +14,13 @@ package training -import "errors" - const ( - CHART_PKG_LOC = "CHARTREPO" - // GPUResourceName is the extended name of the GPU resource since v1.8 + // NVIDIAGPUResourceName is the extended name of the GPU resource since v1.8 // this uses the device plugin mechanism NVIDIAGPUResourceName = "nvidia.com/gpu" - // GPUShareResourceName is the gpushare resource name - GPUShareResourceName = "aliyun.com/gpu-mem" DeprecatedNVIDIAGPUResourceName = "alpha.kubernetes.io/nvidia-gpu" - masterLabelRole = "node-role.kubernetes.io/master" - - gangSchdName = "kube-batchd" - - // labelNodeRolePrefix is a label prefix for node roles - // It's copied over to here until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112 - labelNodeRolePrefix = "node-role.kubernetes.io/" - - // nodeLabelRole specifies the role of a node - nodeLabelRole = "kubernetes.io/role" - - aliyunENIAnnotation = "k8s.aliyun.com/eni" - - requestGPUsOfJobAnnoKey = "requestGPUsOfJobOwner" - spotInstanceJobStatusAnnotation = "job-supervisor.kube-ai.io/job-status" // TrainingReplicaTypeLabel training-operator replica type label @@ -48,7 +28,3 @@ const ( // TrainingReplicaIndexLabel training-operator replica index label TrainingReplicaIndexLabel = "training.kubeflow.org/replica-index" ) - -var ( - errNotFoundOperator = errors.New("the server could not find the requested resource") -) diff --git a/pkg/training/dashboard_helper.go b/pkg/training/dashboard_helper.go index 5f47e516b..1b5bbebd2 100644 --- a/pkg/training/dashboard_helper.go +++ b/pkg/training/dashboard_helper.go @@ -132,29 +132,28 @@ func dashboardFromNodePort(k8sclient kubernetes.Interface, namespace string, nam // address := svc.Status.LoadBalancer.Ingress[0].IP // port := svc.Spec.Ports[0].Port // return fmt.Sprintf("%s:%d", address, port), nil - for _, port := range svc.Spec.Ports { - nodePort := port.NodePort - // Get node address - node := corev1.Node{} - findReadyNode := false + port := svc.Spec.Ports[0] + nodePort := port.NodePort + // Get node address + node := corev1.Node{} + findReadyNode := false - for _, item := range nodes { - for _, condition := range item.Status.Conditions { - if condition.Type == "Ready" { - if condition.Status == "True" { - node = *item - findReadyNode = true - break - } + for _, item := range nodes { + for _, condition := range item.Status.Conditions { + if condition.Type == "Ready" { + if condition.Status == "True" { + node = *item + findReadyNode = true + break } } } - if !findReadyNode { - return "", fmt.Errorf("Failed to find the ready node for exporting dashboard.") - } - address := node.Status.Addresses[0].Address - return fmt.Sprintf("%s:%d", address, nodePort), nil } + if !findReadyNode { + return "", fmt.Errorf("Failed to find the ready node for exporting dashboard.") + } + address := node.Status.Addresses[0].Address + return fmt.Sprintf("%s:%d", address, nodePort), nil } } diff --git a/pkg/training/get.go b/pkg/training/get.go index 114e148e6..6c85697f8 100644 --- a/pkg/training/get.go +++ b/pkg/training/get.go @@ -16,35 +16,27 @@ package training import ( "context" + "encoding/json" "fmt" + "os" "strconv" + "strings" "sync" + "text/tabwriter" + "time" log "github.com/sirupsen/logrus" - - "encoding/json" - "os" - "strings" - "text/tabwriter" + "gopkg.in/yaml.v2" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" "github.com/kubeflow/arena/pkg/apis/config" "github.com/kubeflow/arena/pkg/apis/types" "github.com/kubeflow/arena/pkg/apis/utils" - "github.com/kubeflow/arena/pkg/util" - yaml "gopkg.in/yaml.v2" - - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" ) -var ( - errJobNotFoundMessage = "Not found training job %s in namespace %s,please use 'arena submit' to create it." - errGetMsg = "Failed to get the training job %s, but the trainer config is found, please clean it by using 'arena delete %s %v'." -) var getJobTemplate = ` Name: %v Status: %v @@ -165,14 +157,14 @@ func PrintTrainingJob(job TrainingJob, format string, showEvents bool, showGPUs if err != nil { fmt.Printf("Failed due to %v", err) } else { - fmt.Printf(string(outBytes)) + fmt.Print(string(outBytes)) } case "yaml": outBytes, err := yaml.Marshal(BuildJobInfo(job, showGPUs, services, nodes)) if err != nil { fmt.Printf("Failed due to %v", err) } else { - fmt.Printf(string(outBytes)) + fmt.Print(string(outBytes)) } case "wide", "": printSingleJobHelper(BuildJobInfo(job, showGPUs, services, nodes), job.Resources(), showEvents, showGPUs) @@ -278,7 +270,7 @@ func printEvents(lines []string, namespace string, resouces []Resource) []string lines = append(lines, fmt.Sprintf(" %v\t%v\t%v\t%v", instanceName, event.Type, - util.ShortHumanDuration(time.Now().Sub(event.CreationTimestamp.Time)), + util.ShortHumanDuration(time.Since(event.CreationTimestamp.Time)), fmt.Sprintf("[%s] %s", event.Reason, event.Message), )) } diff --git a/pkg/training/get_advanced.go b/pkg/training/get_advanced.go index 249aef5c5..bd4170271 100644 --- a/pkg/training/get_advanced.go +++ b/pkg/training/get_advanced.go @@ -22,7 +22,6 @@ import ( "github.com/kubeflow/arena/pkg/apis/utils" "github.com/kubeflow/arena/pkg/prometheus" "github.com/kubeflow/arena/pkg/util/kubeclient" - "github.com/kubeflow/arena/pkg/util/kubectl" log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" @@ -60,14 +59,6 @@ func getTrainingTypes(name, namespace string) (cms []string, err error) { return cms, nil } -/** -* check if the training config exist - */ -func isTrainingConfigExist(name, trainingType, namespace string) bool { - configName := fmt.Sprintf("%s-%s", name, trainingType) - return kubectl.CheckAppConfigMap(configName, namespace) -} - /** * BuildTrainingJobInfo returns types.TrainingJobInfo */ @@ -86,6 +77,9 @@ func BuildJobInfo(job TrainingJob, showGPUs bool, services []*v1.Service, nodes instances := []types.TrainingJobInstance{} if showGPUs { jobGPUMetric, err = GetJobGpuMetric(config.GetArenaConfiger().GetClientSet(), job) + if err != nil { + log.Debugf("get job gpu metric failed, err: %s", err) + } } for _, pod := range job.AllPods() { isChief := false diff --git a/pkg/training/gpu.go b/pkg/training/gpu.go index ed23b7b58..25485b029 100644 --- a/pkg/training/gpu.go +++ b/pkg/training/gpu.go @@ -17,54 +17,12 @@ package training import ( "strconv" - "github.com/kubeflow/arena/pkg/apis/types" - "github.com/kubeflow/arena/pkg/apis/utils" log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" -) - -// filter out the pods with GPU -func gpuPods(pods []v1.Pod) (podsWithGPU []v1.Pod) { - for _, pod := range pods { - if gpuInPod(pod) > 0 { - podsWithGPU = append(podsWithGPU, pod) - } - } - return podsWithGPU -} - -// The way to get total GPU Count of Node: nvidia.com/gpu -func totalGpuInNode(node v1.Node) int64 { - val, ok := node.Status.Capacity[NVIDIAGPUResourceName] - - if !ok { - return gpuInNodeDeprecated(node) - } - - return val.Value() -} - -// The way to get allocatble GPU Count of Node: nvidia.com/gpu -func allocatableGpuInNode(node v1.Node) int64 { - val, ok := node.Status.Allocatable[NVIDIAGPUResourceName] - if !ok { - return gpuInNodeDeprecated(node) - } - - return val.Value() -} - -// The way to get GPU Count of Node: alpha.kubernetes.io/nvidia-gpu -func gpuInNodeDeprecated(node v1.Node) int64 { - val, ok := node.Status.Allocatable[DeprecatedNVIDIAGPUResourceName] - - if !ok { - return 0 - } - - return val.Value() -} + "github.com/kubeflow/arena/pkg/apis/types" + "github.com/kubeflow/arena/pkg/apis/utils" +) func gpuInPod(pod v1.Pod) (gpuCount int64) { containers := pod.Spec.Containers diff --git a/pkg/training/list.go b/pkg/training/list.go index df301ab56..8f99aec16 100644 --- a/pkg/training/list.go +++ b/pkg/training/list.go @@ -45,7 +45,7 @@ func ListTrainingJobs(namespace string, allNamespaces bool, jobType types.Traini if strings.Contains(err.Error(), "forbidden: User") { item := fmt.Sprintf("namespace %v", namespace) if allNamespaces { - item = fmt.Sprintf("all namespaces") + item = "all namespaces" } log.Debugf("the user has no privileges to list the %v in %v,reason: %v", trainerType, item, err) noPrivileges = true @@ -63,7 +63,7 @@ func ListTrainingJobs(namespace string, allNamespaces bool, jobType types.Traini if noPrivileges { item := fmt.Sprintf("namespace %v", namespace) if allNamespaces { - item = fmt.Sprintf("all namespaces") + item = "all namespaces" } return nil, fmt.Errorf("the user has no privileges to list the training jobs in %v", item) } diff --git a/pkg/training/logs.go b/pkg/training/logs.go index 43384d970..09b019501 100644 --- a/pkg/training/logs.go +++ b/pkg/training/logs.go @@ -68,18 +68,10 @@ func AcceptJobLog(jobName string, trainingType types.TrainingJobType, args *type return err } -func getTrainingJobTypes() []string { - jobTypes := []string{} - for _, trainingType := range utils.GetTrainingJobTypes() { - jobTypes = append(jobTypes, string(trainingType)) - } - return jobTypes -} - func getInstanceName(job TrainingJob) (string, error) { pods := job.AllPods() // if not found pods,return an error - if pods == nil || len(pods) == 0 { + if len(pods) == 0 { return "", fmt.Errorf("not found instances of the job %v", job.Name()) } // if the job has only one pod,return its' name @@ -97,7 +89,7 @@ func getInstanceName(job TrainingJob) (string, error) { func moreThanOneInstanceHelpInfo(pods []*v1.Pod) string { header := fmt.Sprintf("There is %d instances have been found:", len(pods)) lines := []string{} - footer := fmt.Sprintf("please use '-i' or '--instance' to filter.") + footer := "please use '-i' or '--instance' to filter." for _, p := range pods { lines = append(lines, fmt.Sprintf("%v", p.Name)) } diff --git a/pkg/training/pod_helper.go b/pkg/training/pod_helper.go index 12a49bbe5..a31689724 100644 --- a/pkg/training/pod_helper.go +++ b/pkg/training/pod_helper.go @@ -15,193 +15,12 @@ package training import ( - "context" - "fmt" "sort" log "github.com/sirupsen/logrus" - batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/client-go/kubernetes" ) -// acquire all active pods from all namespaces -func acquireAllActivePods(client *kubernetes.Clientset) ([]v1.Pod, error) { - allPods := []v1.Pod{} - - fieldSelector, err := fields.ParseSelector("status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)) - if err != nil { - return allPods, err - } - nodeNonTerminatedPodsList, err := client.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{FieldSelector: fieldSelector.String()}) - if err != nil { - return allPods, err - } - - for _, pod := range nodeNonTerminatedPodsList.Items { - allPods = append(allPods, pod) - } - return allPods, nil -} - -func acquireAllPods(client *kubernetes.Clientset, namespace string, allNamespaces bool) ([]v1.Pod, error) { - allPods := []v1.Pod{} - ns := namespace - if allNamespaces { - ns = metav1.NamespaceAll - } - podList, err := client.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return allPods, err - } - for _, pod := range podList.Items { - allPods = append(allPods, pod) - } - return allPods, nil -} - -func acquireAllJobs(client *kubernetes.Clientset, namespace string, allNamespaces bool) ([]batchv1.Job, error) { - allJobs := []batchv1.Job{} - ns := namespace - if allNamespaces { - ns = metav1.NamespaceAll - } - jobList, err := client.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return allJobs, err - } - for _, job := range jobList.Items { - allJobs = append(allJobs, job) - } - return allJobs, nil -} - -func getPodNameFromJob(client kubernetes.Interface, namespace, name string) (podName string, err error) { - pods, err := jobPods(client, namespace, name) - if err != nil { - return "", err - } - if len(pods) == 0 { - return "", fmt.Errorf("Failed to find the pod for job %s, maybe you need to set --namespace", name) - } - - for _, pod := range pods { - meta := pod.ObjectMeta - isJob := false - owners := meta.OwnerReferences - for _, owner := range owners { - if owner.Kind == "Job" { - isJob = true - break - } - } - if isJob { - return pod.Name, nil - } - } - return "", fmt.Errorf("getPodNameFromJob: Failed to find the pod of job") -} - -// Get the latest pod from the Kubernetes job -func getPodFromJob(client kubernetes.Interface, jobName, namespace, releaseName string) (jobPod v1.Pod, err error) { - pods, err := jobPods(client, namespace, releaseName) - if err != nil { - return jobPod, err - } - if len(pods) == 0 { - return jobPod, fmt.Errorf("getPodFromJob: Failed to find the pod for job %s, maybe you need to set --namespace", jobName) - } - var latest metav1.Time - for _, pod := range pods { - meta := pod.ObjectMeta - isJob := false - owners := meta.OwnerReferences - for _, owner := range owners { - if owner.Kind != "Job" { - continue - } - isJob = true - break - } - if !isJob { - continue - } - // return pod, nil - if jobPod.Name == "" { - latest = pod.CreationTimestamp - jobPod = pod - log.Debugf("set pod %s as first jobpod, and it's time is %v", jobPod.Name, jobPod.CreationTimestamp) - continue - } - log.Debugf("current jobpod %s , and it's time is %v", jobPod.Name, latest) - log.Debugf("candidate jobpod %s , and it's time is %v", pod.Name, pod.CreationTimestamp) - current := pod.CreationTimestamp - if !latest.Before(¤t) { - log.Debugf("no replace") - continue - } - jobPod = pod - latest = current - log.Debugf("replace") - } - if jobPod.Name == "" { - err = fmt.Errorf("Not able to job with release %s in pods %v", releaseName, pods) - } - return jobPod, err -} - -// List all the pods which associate to the arena jobs, including the pods in the statefulset and the job -func listAllPodsForJob(client kubernetes.Interface, jobName, namespace string, releaseName string) (pods []v1.Pod, err error) { - podList, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{ - TypeMeta: metav1.TypeMeta{ - Kind: "ListOptions", - APIVersion: "v1", - }, LabelSelector: fmt.Sprintf("release=%s", releaseName), - }) - if err != nil { - return nil, err - } - pods = []v1.Pod{} - for _, item := range podList.Items { - meta := item.ObjectMeta - isJob := false - owners := meta.OwnerReferences - for _, owner := range owners { - if owner.Kind != "Job" { - continue - } - isJob = true - log.Debugf("find job pod %v, break", item) - break - } - if !isJob { - pods = append(pods, item) - log.Debugf("add pod %v to pods", item) - } - } - jobPod, err := getPodFromJob(client, jobName, namespace, releaseName) - if err != nil { - return nil, err - } - pods = append(pods, jobPod) - return pods, err -} - -func jobPods(client kubernetes.Interface, namespace string, releaseName string) ([]v1.Pod, error) { - podList, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{ - TypeMeta: metav1.TypeMeta{ - Kind: "ListOptions", - APIVersion: "v1", - }, LabelSelector: fmt.Sprintf("release=%s", releaseName), - }) - if err != nil { - return nil, err - } - return podList.Items, err -} - // Sort the pod condition by time. type SortPodConditionByLastTransitionTime []v1.PodCondition diff --git a/pkg/training/prune.go b/pkg/training/prune.go index d8c71a954..ce8fedbac 100644 --- a/pkg/training/prune.go +++ b/pkg/training/prune.go @@ -4,13 +4,10 @@ import ( "fmt" "time" - "github.com/kubeflow/arena/pkg/util" log "github.com/sirupsen/logrus" -) -type PruneArgs struct { - since time.Duration -} + "github.com/kubeflow/arena/pkg/util" +) func PruneTrainingJobs(namespace string, allNamespaces bool, since time.Duration) error { jobs := []TrainingJob{} diff --git a/pkg/training/top_job.go b/pkg/training/top_job.go index f39ec2f83..2bc57c0a6 100644 --- a/pkg/training/top_job.go +++ b/pkg/training/top_job.go @@ -82,9 +82,7 @@ func topTrainingJobs(args []string, namespace string, allNamespaces bool, jobTyp if err != nil { return err } - for _, j := range allJobs { - jobs = append(jobs, j) - } + jobs = append(jobs, allJobs...) } jobs = makeTrainingJobOrderdByGPUCount(jobs) jobInfos := []types.TrainingJobInfo{} @@ -99,14 +97,14 @@ func topTrainingJobs(args []string, namespace string, allNamespaces bool, jobTyp if err != nil { return err } - fmt.Printf(string(outBytes)) + fmt.Print(string(outBytes)) return nil case types.YamlFormat: outBytes, err := yaml.Marshal(jobInfos) if err != nil { return err } - fmt.Printf(string(outBytes)) + fmt.Print(string(outBytes)) return nil } w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) diff --git a/pkg/training/trainer_mpi.go b/pkg/training/trainer_mpi.go index e015f287d..540874bc4 100644 --- a/pkg/training/trainer_mpi.go +++ b/pkg/training/trainer_mpi.go @@ -355,25 +355,6 @@ func (tt *MPIJobTrainer) isChiefPod(item *v1.Pod) bool { return true } -func (tt *MPIJobTrainer) isMPIJob(name, ns string, item v1alpha1.MPIJob) bool { - if val, ok := item.Labels["release"]; ok && (val == name) { - log.Debugf("the mpijob %s with labels %s", item.Name, val) - } else { - return false - } - - if val, ok := item.Labels["app"]; ok && (val == "mpijob") { - log.Debugf("the mpijob %s with labels %s is found.", item.Name, val) - } else { - return false - } - - if item.Namespace != ns { - return false - } - return true -} - func (tt *MPIJobTrainer) isMPIPod(name, ns string, pod *v1.Pod) bool { return utils.IsMPIPod(name, ns, pod) } diff --git a/pkg/training/trainer_pytorch.go b/pkg/training/trainer_pytorch.go index 16b93620f..6a9c5a765 100644 --- a/pkg/training/trainer_pytorch.go +++ b/pkg/training/trainer_pytorch.go @@ -84,17 +84,6 @@ func (pj *PyTorchJob) GetTrainJob() interface{} { return pj.pytorchjob } -func checkPyTorchStatus(status commonv1.JobStatus) commonv1.JobConditionType { - t := commonv1.JobConditionType("Pending") - for _, condition := range status.Conditions { - if condition.Status == v1.ConditionTrue { - t = condition.Type - break - } - } - return t -} - // Get the Status of the Job: RUNNING, PENDING, SUCCEEDED, FAILED func (pj *PyTorchJob) GetStatus() (status string) { status = "PENDING" @@ -345,20 +334,6 @@ func (tt *PyTorchJobTrainer) isChiefPod(pytorchjob *pytorchv1.PyTorchJob, item * return isChiefPod } -// check Labels: release==pytorchjob.name/app=="pytorchjob", namespace -func (tt *PyTorchJobTrainer) isPyTorchJob(name, ns string, item *pytorchv1.PyTorchJob) bool { - if item.Namespace != ns { - return false - } - if item.Labels["release"] != name { - return false - } - if item.Labels["app"] != string(tt.trainerType) { - return false - } - return true -} - // Determine whether it is a pod of pytorchjobs submitted by Arena // check pod label: release==pytorchjob.name/app=="pytorchjob"/group-name=='kubeflow.org', namespace func (tt *PyTorchJobTrainer) isPyTorchPod(name, ns string, pod *v1.Pod) bool { diff --git a/pkg/training/trainer_spark.go b/pkg/training/trainer_spark.go index 43a308d1f..ab28b15a9 100644 --- a/pkg/training/trainer_spark.go +++ b/pkg/training/trainer_spark.go @@ -83,7 +83,6 @@ func (sj *SparkJob) GetStatus() (status string) { if r := recover(); r != nil { fmt.Println("spark job may not complete,because of ", r) } - return }() status = "UNKNOWN" @@ -273,25 +272,6 @@ func (st *SparkJobTrainer) IsSupported(name, ns string) bool { return err == nil } -func (st *SparkJobTrainer) isSparkJob(name, ns string, job v1beta2.SparkApplication) bool { - if val, ok := job.Labels["release"]; ok && (val == name) { - log.Debugf("the sparkjob %s with labels %s", job.Name, val) - } else { - return false - } - - if val, ok := job.Labels["app"]; ok && (val == "sparkjob") { - log.Debugf("the sparkjob %s with labels %s is found.", job.Name, val) - } else { - return false - } - - if job.Namespace != ns { - return false - } - return true -} - func (st *SparkJobTrainer) GetTrainingJob(name, namespace string) (TrainingJob, error) { sparkJob, err := k8saccesser.GetK8sResourceAccesser().GetSparkJob(st.sparkjobClient, namespace, name) if err != nil { diff --git a/pkg/training/trainer_tensorflow.go b/pkg/training/trainer_tensorflow.go index 88137793b..5418b180e 100644 --- a/pkg/training/trainer_tensorflow.go +++ b/pkg/training/trainer_tensorflow.go @@ -361,26 +361,6 @@ func (tt *TensorFlowJobTrainer) isChiefPod(tfjob *tfv1.TFJob, item *v1.Pod) bool return isChiefPod } -func (tt *TensorFlowJobTrainer) isTensorFlowJob(name, ns string, item *tfv1.TFJob) bool { - - if val, ok := item.Labels["release"]; ok && (val == name) { - log.Debugf("the tfjob %s with labels %s", item.Name, val) - } else { - return false - } - - if val, ok := item.Labels["app"]; ok && (val == "tfjob") { - log.Debugf("the tfjob %s with labels %s is found.", item.Name, val) - } else { - return false - } - - if item.Namespace != ns { - return false - } - return true -} - func (tt *TensorFlowJobTrainer) isTensorFlowPod(name, ns string, item *v1.Pod) bool { return utils.IsTensorFlowPod(name, ns, item) } @@ -449,15 +429,6 @@ func makeJobStatusSortedByTime(conditions []commonv1.JobCondition) []commonv1.Jo return []commonv1.JobCondition(newConditions) } -func hasCondition(status commonv1.JobStatus, condType commonv1.JobConditionType) bool { - for _, condition := range status.Conditions { - if condition.Type == condType && condition.Status == v1.ConditionTrue { - return true - } - } - return false -} - func checkStatus(status commonv1.JobStatus) commonv1.JobConditionType { t := commonv1.JobConditionType("Pending") for _, condition := range status.Conditions { diff --git a/pkg/training/trainer_volcano.go b/pkg/training/trainer_volcano.go index f36d23a1c..8bc64f2f2 100644 --- a/pkg/training/trainer_volcano.go +++ b/pkg/training/trainer_volcano.go @@ -76,7 +76,6 @@ func (vj *VolcanoJob) GetStatus() (status string) { if r := recover(); r != nil { fmt.Println("volcano job may not complete,because of ", r) } - return }() status = "UNKNOWN" @@ -351,22 +350,6 @@ func (st *VolcanoJobTrainer) ListTrainingJobs(namespace string, allNamespace boo return trainingJobs, nil } -func (st *VolcanoJobTrainer) isVolcanoJob(name, ns string, job *v1alpha1.Job) bool { - if job.Labels["release"] != name { - return false - } - log.Debugf("the volcano job %s with labels release=%s", job.Name, name) - - if job.Labels["app"] != string(st.trainerType) { - return false - } - log.Debugf("the volcano job %s with labels app=%v is found.", job.Name, st.trainerType) - if job.Namespace != ns { - return false - } - return true -} - func (st *VolcanoJobTrainer) isVolcanoPod(name, ns string, pod *v1.Pod) bool { return utils.IsVolcanoPod(name, ns, pod) } diff --git a/pkg/util/charts.go b/pkg/util/charts.go index 0594a737d..d41a9bd6f 100644 --- a/pkg/util/charts.go +++ b/pkg/util/charts.go @@ -7,10 +7,7 @@ import ( func pathExists(path string) bool { _, err := os.Stat(path) if err != nil { - if os.IsExist(err) { - return true - } - return false + return os.IsExist(err) } return true } diff --git a/pkg/util/config/loader.go b/pkg/util/config/loader.go index d7bd867b5..99ef5e544 100644 --- a/pkg/util/config/loader.go +++ b/pkg/util/config/loader.go @@ -8,8 +8,6 @@ import ( log "github.com/sirupsen/logrus" ) -const doubleQuoteSpecialChars = "\\\n\r\"!$`" - // ReadEnvFile returns configs map func ReadConfigFile(filename string) (configs map[string]string) { configs = make(map[string]string) diff --git a/pkg/util/duration.go b/pkg/util/duration.go index de15af2c1..51d7b3cdf 100644 --- a/pkg/util/duration.go +++ b/pkg/util/duration.go @@ -29,9 +29,9 @@ func ShortHumanDuration(d time.Duration) string { // Allow deviation no more than 2 seconds(excluded) to tolerate machine time // inconsistence, it can be considered as almost now. if seconds := int(d.Seconds()); seconds < -1 { - return fmt.Sprintf("") + return "" } else if seconds < 0 { - return fmt.Sprintf("0s") + return "0s" } else if seconds < 60 { return fmt.Sprintf("%ds", seconds) } else if minutes := int(d.Minutes()); minutes < 60 { diff --git a/pkg/util/kubectl/kubectl.go b/pkg/util/kubectl/kubectl.go index 30606f79c..6acb029ca 100644 --- a/pkg/util/kubectl/kubectl.go +++ b/pkg/util/kubectl/kubectl.go @@ -26,16 +26,15 @@ import ( kservev1beta1 "github.com/kserve/kserve/pkg/apis/serving/v1beta1" kserveClient "github.com/kserve/kserve/pkg/client/clientset/versioned" - "github.com/kubeflow/arena/pkg/apis/config" log "github.com/sirupsen/logrus" v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/kubernetes" + + "github.com/kubeflow/arena/pkg/apis/config" ) var kubectlCmd = []string{"arena-kubectl"} -var kubeClient *kubernetes.Clientset /** * dry-run creating kubernetes App Info for delete in future diff --git a/pkg/util/port_allocate_test.go b/pkg/util/port_allocate_test.go index f6b9d1a43..1ec0491de 100644 --- a/pkg/util/port_allocate_test.go +++ b/pkg/util/port_allocate_test.go @@ -34,6 +34,9 @@ func TestSelectAvailablePort(t *testing.T) { t.Errorf("Port should be 30002, when 30000,30001 is used") } port4, err := SelectAvailablePortWithDefault(clientset, 0) + if err != nil { + t.Errorf("failed to SelectAvailablePortWithDefault, %++v", err) + } t.Logf("port is %d", port4) if port4 == port3 { t.Errorf("If default port is used, chose another one") diff --git a/pkg/util/resource.go b/pkg/util/resource.go index 2274784f6..3750a2b5f 100644 --- a/pkg/util/resource.go +++ b/pkg/util/resource.go @@ -24,9 +24,7 @@ func AcquireAllPods(namespace string, client *kubernetes.Clientset) ([]v1.Pod, e if err != nil { return pods, err } - for _, pod := range podList.Items { - pods = append(pods, pod) - } + pods = append(pods, podList.Items...) allPods[namespace] = pods log.Debugf("Pods in %s: %++v", namespace, allPods[namespace]) return pods, nil diff --git a/pkg/util/retry.go b/pkg/util/retry.go index ee79f8b80..ce940dd7b 100644 --- a/pkg/util/retry.go +++ b/pkg/util/retry.go @@ -64,7 +64,7 @@ func RetryDuring(duration time.Duration, sleep time.Duration, callback func() er log.Infof("Still need to wait for func, err:%s\n", err.Error()) } - delta := time.Now().Sub(start) + delta := time.Since(start) if delta > duration { return fmt.Errorf("After %d attempts (during %s), last error: %s", i, delta, err) } diff --git a/pkg/util/validate.go b/pkg/util/validate.go index f5eb99f7c..8aad13f2a 100644 --- a/pkg/util/validate.go +++ b/pkg/util/validate.go @@ -24,21 +24,13 @@ import ( "k8s.io/client-go/kubernetes" ) -const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" -const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" -const DNS1123SubdomainMaxLength int = 253 - const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" -const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" -const DNS1123LabelMaxLength int = 63 // Job Max lenth should be 49 const JobMaxLength int = 49 var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") -var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") - // ValidateJobName validates the job name, its length should less than 63, and match dns1123LabelFmt func ValidateJobName(value string) error { if len(value) > JobMaxLength { diff --git a/samples/sdk/custom-serving/main.go b/samples/sdk/custom-serving/main.go index c2c0a1011..73d6519dd 100644 --- a/samples/sdk/custom-serving/main.go +++ b/samples/sdk/custom-serving/main.go @@ -20,6 +20,10 @@ func main() { LogLevel: "info", Namespace: "default", }) + if err != nil { + fmt.Printf("failed to create arena client, err: %v\n", err) + return + } // create tfjob /* command: arena serve custom \ diff --git a/samples/sdk/etjob/etjob.go b/samples/sdk/etjob/etjob.go index fc21af16d..b9b9e8773 100644 --- a/samples/sdk/etjob/etjob.go +++ b/samples/sdk/etjob/etjob.go @@ -19,6 +19,10 @@ func main() { LogLevel: "debug", Namespace: "default", }) + if err != nil { + fmt.Printf("failed to create arena client, err: %v\n", err) + return + } // create tfjob /* command: arena submit etjob \ diff --git a/samples/sdk/mpijob/mpijob.go b/samples/sdk/mpijob/mpijob.go index ec0782a90..911d8a3ca 100644 --- a/samples/sdk/mpijob/mpijob.go +++ b/samples/sdk/mpijob/mpijob.go @@ -19,6 +19,10 @@ func main() { LogLevel: "info", Namespace: "default", }) + if err != nil { + fmt.Printf("failed to create arena client, err: %v\n", err) + return + } // create tfjob /* command: arena submit mpijob --name=mpi-standalone-test \