diff --git a/.ci/README.md b/.ci/README.md index 2c0c8b56d144..e156ca5be35e 100644 --- a/.ci/README.md +++ b/.ci/README.md @@ -82,6 +82,13 @@ done In the event of a failure, this will stop running. If it succeeds, update the sync tag with `git push origin HEAD:tpg-sync`. +### Downstream build job is not triggered by commits. +This is rare but we've seen this happened before. In this case, we need to manually trigger a Cloud Build job by running +``` +gcloud builds triggers run build-downstreams --project=graphite-docker-images --substitutions=BRANCH_NAME= --sha= +``` +You'll need to substitute `` with the commit sha that you'd like to trigger the build against and `` with base branch that this commit is pushed into, likely `main` but could be feature branches in some cases. + ## Deploying the pipeline The code on the PR's branch is used to plan actions - no merge is performed. If you are making changes to the workflows, your changes will not trigger a workflow run, because of the risk of an untrusted contributor introducing malicious code in this way. You will need to test locally by using the [cloud build local builder](https://cloud.google.com/cloud-build/docs/build-debug-locally). @@ -89,7 +96,6 @@ If you are making changes to the containers, your changes will not apply until t Pausing the pipeline is done in the cloud console, by setting the downstream-builder trigger to disabled. You can find that trigger [here](https://console.cloud.google.com/cloud-build/triggers/edit/f80a7496-b2f4-4980-a706-c5425a52045b?project=graphite-docker-images) - ## Dependency change handbook: If someone (often a bot) creates a PR which updates Gemfile or Gemfile.lock, they will not be able to generate diffs. This is because bundler doesn't allow you to run a binary unless your installed gems exactly match the Gemfile.lock, and since we have to run generation before and after the change, there is no possible container that will satisfy all requirements. diff --git a/.ci/containers/go-plus/Dockerfile b/.ci/containers/go-plus/Dockerfile index 6a6d48d9038f..c7b8641eb297 100644 --- a/.ci/containers/go-plus/Dockerfile +++ b/.ci/containers/go-plus/Dockerfile @@ -1,5 +1,25 @@ -from golang:1.19-bullseye as resource +# Stage 1: Download go module cache for builds +FROM golang:1.19-bullseye AS builder +ENV GOCACHE=/go/cache + +RUN apt-get update && apt-get install -y unzip +WORKDIR /app1 +# Add the source code and build +ADD "https://github.com/GoogleCloudPlatform/magic-modules/archive/refs/heads/main.zip" source.zip +RUN unzip source.zip && rm source.zip +WORKDIR /app1/magic-modules-main/.ci/magician +# Build the binary (we won't use it in the final image, but it's cached) +RUN go build -o /dev/null . + +# Stage 2: Creating the final image +FROM golang:1.19-bullseye SHELL ["/bin/bash", "-c"] +ENV GOCACHE=/go/cache + +# Copy Go dependencies and Go build cache +COPY --from=builder /go/pkg/mod /go/pkg/mod +COPY --from=builder /go/cache /go/cache + # Set up Github SSH cloning. RUN ssh-keyscan github.com >> /known_hosts RUN echo "UserKnownHostsFile /known_hosts" >> /etc/ssh/ssh_config diff --git a/.ci/containers/membership-checker/Dockerfile b/.ci/containers/membership-checker/Dockerfile deleted file mode 100644 index 81f0e6b35dab..000000000000 --- a/.ci/containers/membership-checker/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM golang:1.20 - -WORKDIR /src -COPY . ./ - -RUN go build - -ENTRYPOINT [ "/src/membership-checker" ] \ No newline at end of file diff --git a/.ci/containers/membership-checker/community.go b/.ci/containers/membership-checker/community.go deleted file mode 100644 index 18062d370018..000000000000 --- a/.ci/containers/membership-checker/community.go +++ /dev/null @@ -1,152 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - - "google.golang.org/api/cloudbuild/v1" -) - -func approveCommunityChecker(prNumber, projectId, commitSha string) error { - buildId, err := getPendingBuildId(projectId, commitSha) - if err != nil { - return err - } - - if buildId == "" { - return fmt.Errorf("Failed to find pending build for PR %s", prNumber) - } - - err = approveBuild(projectId, buildId) - if err != nil { - return err - } - - return nil -} - -func postAwaitingApprovalBuildLink(prNumber, GITHUB_TOKEN, projectId, commitSha string) error { - buildId, err := getPendingBuildId(projectId, commitSha) - if err != nil { - return err - } - - if buildId == "" { - return fmt.Errorf("Failed to find pending build for PR %s", prNumber) - } - - targetUrl := fmt.Sprintf("https://console.cloud.google.com/cloud-build/builds;region=global/%s?project=%s", buildId, projectId) - - postBody := map[string]string{ - "context": "Approve Build", - "state": "success", - "target_url": targetUrl, - } - - err = postBuildStatus(prNumber, GITHUB_TOKEN, commitSha, postBody) - if err != nil { - return err - } - - return nil -} - -func postBuildStatus(prNumber, GITHUB_TOKEN, commitSha string, body map[string]string) error { - - url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/statuses/%s", commitSha) - - _, err := requestCall(url, "POST", GITHUB_TOKEN, nil, body) - if err != nil { - return err - } - - fmt.Printf("Successfully posted community-checker build link to pull request %s\n", prNumber) - - return nil -} - -func getPendingBuildId(projectId, commitSha string) (string, error) { - COMMUNITY_CHECKER_TRIGGER, ok := os.LookupEnv("COMMUNITY_CHECKER_TRIGGER") - if !ok { - return "", fmt.Errorf("Did not provide COMMUNITY_CHECKER_TRIGGER environment variable") - } - - ctx := context.Background() - - c, err := cloudbuild.NewService(ctx) - if err != nil { - return "", err - } - - filter := fmt.Sprintf("trigger_id=%s AND status=PENDING", COMMUNITY_CHECKER_TRIGGER) - // Builds will be sorted by createTime, descending order. - // 50 should be enough to include the one needs auto approval - pageSize := int64(50) - - builds, err := c.Projects.Builds.List(projectId).Filter(filter).PageSize(pageSize).Do() - if err != nil { - return "", err - } - - for _, build := range builds.Builds { - if build.Substitutions["COMMIT_SHA"] == commitSha { - return build.Id, nil - } - } - - return "", nil -} - -func approveBuild(projectId, buildId string) error { - ctx := context.Background() - - c, err := cloudbuild.NewService(ctx) - if err != nil { - return err - } - - name := fmt.Sprintf("projects/%s/builds/%s", projectId, buildId) - - approveBuildRequest := &cloudbuild.ApproveBuildRequest{ - ApprovalResult: &cloudbuild.ApprovalResult{ - Decision: "APPROVED", - }, - } - - _, err = c.Projects.Builds.Approve(name, approveBuildRequest).Do() - if err != nil { - return err - } - - fmt.Println("Auto approved build ", buildId) - - return nil -} - -func addAwaitingApprovalLabel(prNumber, GITHUB_TOKEN string) error { - url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/issues/%s/labels", prNumber) - - body := map[string][]string{ - "labels": []string{"awaiting-approval"}, - } - _, err := requestCall(url, "POST", GITHUB_TOKEN, nil, body) - - if err != nil { - return fmt.Errorf("Failed to add awaiting approval label: %s", err) - } - - return nil - -} - -func removeAwaitingApprovalLabel(prNumber, GITHUB_TOKEN string) error { - url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/issues/%s/labels/awaiting-approval", prNumber) - _, err := requestCall(url, "DELETE", GITHUB_TOKEN, nil, nil) - - if err != nil { - return fmt.Errorf("Failed to remove awaiting approval label: %s", err) - } - - return nil -} diff --git a/.ci/containers/membership-checker/main.go b/.ci/containers/membership-checker/main.go deleted file mode 100644 index d9a1d2bb0dbd..000000000000 --- a/.ci/containers/membership-checker/main.go +++ /dev/null @@ -1,131 +0,0 @@ -package main - -import ( - "fmt" - "os" -) - -func main() { - GITHUB_TOKEN, ok := os.LookupEnv("GITHUB_TOKEN") - if !ok { - fmt.Println("Did not provide GITHUB_TOKEN environment variable") - os.Exit(1) - } - if len(os.Args) <= 7 { - fmt.Println("Not enough arguments") - os.Exit(1) - } - - projectId := "graphite-docker-images" - repoName := "magic-modules" - - target := os.Args[1] - fmt.Println("Trigger Target: ", target) - - prNumber := os.Args[2] - fmt.Println("PR Number: ", prNumber) - - commitSha := os.Args[3] - fmt.Println("Commit SHA: ", commitSha) - - branchName := os.Args[4] - fmt.Println("Branch Name: ", branchName) - - headRepoUrl := os.Args[5] - fmt.Println("Head Repo URL: ", headRepoUrl) - - headBranch := os.Args[6] - fmt.Println("Head Branch: ", headBranch) - - baseBranch := os.Args[7] - fmt.Println("Base Branch: ", baseBranch) - - substitutions := map[string]string{ - "BRANCH_NAME": branchName, - "_PR_NUMBER": prNumber, - "_HEAD_REPO_URL": headRepoUrl, - "_HEAD_BRANCH": headBranch, - "_BASE_BRANCH": baseBranch, - } - - author, err := getPullRequestAuthor(prNumber, GITHUB_TOKEN) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - authorUserType := getUserType(author, GITHUB_TOKEN) - trusted := authorUserType == coreContributorUserType || authorUserType == googlerUserType - - if target == "auto_run" && authorUserType != coreContributorUserType { - fmt.Println("Not core contributor - assigning reviewer") - - firstRequestedReviewer, err := getPullRequestRequestedReviewer(prNumber, GITHUB_TOKEN) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - previouslyInvolvedReviewers, err := getPullRequestPreviousAssignedReviewers(prNumber, GITHUB_TOKEN) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - reviewersToRequest, newPrimaryReviewer := chooseReviewers(firstRequestedReviewer, previouslyInvolvedReviewers) - - for _, reviewer := range reviewersToRequest { - err = requestPullRequestReviewer(prNumber, reviewer, GITHUB_TOKEN) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - } - - if newPrimaryReviewer != "" { - comment := formatReviewerComment(newPrimaryReviewer, authorUserType, trusted) - err = postComment(prNumber, comment, GITHUB_TOKEN, authorUserType) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - } - } - - // auto_run(contributor-membership-checker) will be run on every commit or /gcbrun: - // only triggers builds for trusted users - - // needs_approval(community-checker) will be run after approval: - // 1. will be auto approved (by contributor-membership-checker) for trusted users - // 2. needs approval from team reviewer via cloud build for untrusted users - // 3. only triggers build for untrusted users (because trusted users will be handled by auto_run) - if (target == "auto_run" && trusted) || (target == "needs_approval" && !trusted) { - err = triggerMMPresubmitRuns(projectId, repoName, commitSha, substitutions) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - } - - // in contributor-membership-checker job: - // 1. auto approve community-checker run for trusted users - // 2. add awaiting-approval label to external contributor PRs - if target == "auto_run" { - if trusted { - err = approveCommunityChecker(prNumber, projectId, commitSha) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - } else { - addAwaitingApprovalLabel(prNumber, GITHUB_TOKEN) - postAwaitingApprovalBuildLink(prNumber, GITHUB_TOKEN, projectId, commitSha) - } - } - - // in community-checker job: - // remove awaiting-approval label from external contributor PRs - if target == "needs_approval" { - removeAwaitingApprovalLabel(prNumber, GITHUB_TOKEN) - } -} diff --git a/.ci/containers/membership-checker/reviewer_assignment.go b/.ci/containers/membership-checker/reviewer_assignment.go deleted file mode 100644 index 4dca8ff47e22..000000000000 --- a/.ci/containers/membership-checker/reviewer_assignment.go +++ /dev/null @@ -1,163 +0,0 @@ -package main - -import ( - "fmt" - "net/http" - "strings" - "text/template" - - _ "embed" -) - -var ( - //go:embed REVIEWER_ASSIGNMENT_COMMENT.md - reviewerAssignmentComment string -) - -// Returns a list of users to request review from, as well as a new primary reviewer if this is the first run. -func chooseReviewers(firstRequestedReviewer string, previouslyInvolvedReviewers []string) (reviewersToRequest []string, newPrimaryReviewer string) { - hasPrimaryReviewer := false - newPrimaryReviewer = "" - - if firstRequestedReviewer != "" { - hasPrimaryReviewer = true - } - - if previouslyInvolvedReviewers != nil { - for _, reviewer := range previouslyInvolvedReviewers { - if isTeamReviewer(reviewer) { - hasPrimaryReviewer = true - reviewersToRequest = append(reviewersToRequest, reviewer) - } - } - } - - if !hasPrimaryReviewer { - newPrimaryReviewer = getRandomReviewer() - reviewersToRequest = append(reviewersToRequest, newPrimaryReviewer) - } - - return reviewersToRequest, newPrimaryReviewer -} - -func getPullRequestAuthor(prNumber, GITHUB_TOKEN string) (string, error) { - url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/issues/%s", prNumber) - - var pullRequest struct { - User struct { - Login string `json:"login"` - } `json:"user"` - } - - _, err := requestCall(url, "GET", GITHUB_TOKEN, &pullRequest, nil) - if err != nil { - return "", err - } - - return pullRequest.User.Login, nil -} - -func getPullRequestRequestedReviewer(prNumber, GITHUB_TOKEN string) (string, error) { - url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/pulls/%s/requested_reviewers", prNumber) - - var requestedReviewers struct { - Users []struct { - Login string `json:"login"` - } `json:"users"` - } - - _, err := requestCall(url, "GET", GITHUB_TOKEN, &requestedReviewers, nil) - if err != nil { - return "", err - } - - if requestedReviewers.Users == nil || len(requestedReviewers.Users) == 0 { - return "", nil - } - - return requestedReviewers.Users[0].Login, nil -} - -func getPullRequestPreviousAssignedReviewers(prNumber, GITHUB_TOKEN string) ([]string, error) { - url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/pulls/%s/reviews", prNumber) - - var reviews []struct { - User struct { - Login string `json:"login"` - } `json:"user"` - } - - _, err := requestCall(url, "GET", GITHUB_TOKEN, &reviews, nil) - if err != nil { - return nil, err - } - - previousAssignedReviewers := map[string]struct{}{} - for _, review := range reviews { - previousAssignedReviewers[review.User.Login] = struct{}{} - } - - result := []string{} - for key, _ := range previousAssignedReviewers { - result = append(result, key) - } - - return result, nil -} - -func requestPullRequestReviewer(prNumber, assignee, GITHUB_TOKEN string) error { - url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/pulls/%s/requested_reviewers", prNumber) - - body := map[string][]string{ - "reviewers": []string{assignee}, - "team_reviewers": []string{}, - } - - reqStatusCode, err := requestCall(url, "POST", GITHUB_TOKEN, nil, body) - if err != nil { - return err - } - - if reqStatusCode != http.StatusCreated { - return fmt.Errorf("Error adding reviewer for PR %s", prNumber) - } - - fmt.Printf("Successfully added reviewer %s to pull request %s\n", assignee, prNumber) - - return nil -} - -func formatReviewerComment(newPrimaryReviewer string, authorUserType userType, trusted bool) string { - tmpl, err := template.New("REVIEWER_ASSIGNMENT_COMMENT.md").Parse(reviewerAssignmentComment) - if err != nil { - panic(fmt.Sprintf("Unable to parse REVIEWER_ASSIGNMENT_COMMENT.md: %s", err)) - } - sb := new(strings.Builder) - tmpl.Execute(sb, map[string]interface{}{ - "reviewer": newPrimaryReviewer, - "authorUserType": authorUserType.String(), - "trusted": trusted, - }) - return sb.String() -} - -func postComment(prNumber, comment, GITHUB_TOKEN string, authorUserType userType) error { - url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/issues/%s/comments", prNumber) - - body := map[string]string{ - "body": comment, - } - - reqStatusCode, err := requestCall(url, "POST", GITHUB_TOKEN, nil, body) - if err != nil { - return err - } - - if reqStatusCode != http.StatusCreated { - return fmt.Errorf("Error posting comment for PR %s", prNumber) - } - - fmt.Printf("Successfully posted comment to pull request %s\n", prNumber) - - return nil -} diff --git a/.ci/containers/membership-checker/utils.go b/.ci/containers/membership-checker/utils.go deleted file mode 100644 index b08a7572691e..000000000000 --- a/.ci/containers/membership-checker/utils.go +++ /dev/null @@ -1,48 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - - "golang.org/x/exp/slices" -) - -func requestCall(url, method, credentials string, result interface{}, body interface{}) (int, error) { - client := &http.Client{} - jsonBody, err := json.Marshal(body) - if err != nil { - return 1, fmt.Errorf("Error marshaling JSON: %s", err) - } - req, err := http.NewRequest(method, url, bytes.NewBuffer(jsonBody)) - if err != nil { - return 2, fmt.Errorf("Error creating request: %s", err) - } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", credentials)) - req.Header.Set("Content-Type", "application/json") - resp, err := client.Do(req) - if err != nil { - return 3, err - } - defer resp.Body.Close() - - if result != nil { - if err = json.NewDecoder(resp.Body).Decode(&result); err != nil { - return 4, err - } - } - - return resp.StatusCode, nil -} - -func removes(s1 []string, s2 []string) []string { - result := make([]string, 0, len(s1)) - - for _, v := range s1 { - if !slices.Contains(s2, v) { - result = append(result, v) - } - } - return result -} diff --git a/.ci/gcb-changelog-checker.yml b/.ci/gcb-changelog-checker.yml deleted file mode 100644 index aa2c5289675f..000000000000 --- a/.ci/gcb-changelog-checker.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -steps: - - name: 'gcr.io/graphite-docker-images/bash-plus' - entrypoint: 'bash' - args: - - -c - - echo "This build step has been moved to a github action." - - -# noop - moved to pull_request event on https://github.com/GoogleCloudPlatform/magic-modules/pull/8727. diff --git a/.ci/gcb-community-checker.yml b/.ci/gcb-community-checker.yml index eaf3d6456b36..9aba1d2d9478 100644 --- a/.ci/gcb-community-checker.yml +++ b/.ci/gcb-community-checker.yml @@ -1,17 +1,66 @@ --- steps: - - name: 'gcr.io/graphite-docker-images/membership-checker' - id: community-checker - secretEnv: ["GITHUB_TOKEN", "GENERATE_DIFFS_TRIGGER", "RAKE_TESTS_TRIGGER"] - timeout: 8000s - args: - - "needs_approval" - - $_PR_NUMBER - - $COMMIT_SHA - - $BRANCH_NAME - - $_HEAD_REPO_URL - - $_HEAD_BRANCH - - $_BASE_BRANCH + # The GCB / GH integration uses a shallow clone of the repo. We need to convert + # that to a full clone in order to work with it properly. + # https://cloud.google.com/source-repositories/docs/integrating-with-cloud-build#unshallowing_clones + - name: "gcr.io/cloud-builders/git" + args: + - fetch + - --unshallow + + # Configure git + - name: "gcr.io/cloud-builders/git" + args: + - config + - --global + - user.email + - magic-modules+differ@google.com + - name: "gcr.io/cloud-builders/git" + args: + - config + - --global + - user.name + - "Modular Magician Diff Process" + + # Display commit log for clarity + - name: "gcr.io/cloud-builders/git" + args: + - log + - "--oneline" + - "-n 10" + + # Find common ancestor commit and apply diff for the .ci folder. + - name: "gcr.io/cloud-builders/git" + id: findMergeBase + entrypoint: "bash" + args: + - "-c" + - | + git fetch origin main + if [ "$_BASE_BRANCH" != "main" ]; then + echo "Checking out .ci/ folder from main" + git checkout origin/main -- .ci/ + else + base_commit=$(git merge-base origin/main HEAD) + echo "Common ancestor commit: $base_commit" + git diff $base_commit origin/main -- .ci/ + git diff $base_commit origin/main -- .ci/ > /workspace/ci.diff + git apply ./ci.diff --allow-empty + fi + + - name: 'gcr.io/graphite-docker-images/go-plus' + entrypoint: '/workspace/.ci/scripts/go-plus/magician/exec.sh' + id: community-checker + secretEnv: ["GITHUB_TOKEN", "GENERATE_DIFFS_TRIGGER"] + timeout: 8000s + args: + - "community-checker" + - $_PR_NUMBER + - $COMMIT_SHA + - $BRANCH_NAME + - $_HEAD_REPO_URL + - $_HEAD_BRANCH + - $_BASE_BRANCH availableSecrets: secretManager: @@ -19,5 +68,3 @@ availableSecrets: env: GITHUB_TOKEN - versionName: projects/673497134629/secrets/ci-trigger-generate-diffs/versions/latest env: GENERATE_DIFFS_TRIGGER - - versionName: projects/673497134629/secrets/ci-trigger-rake-test/versions/latest - env: RAKE_TESTS_TRIGGER diff --git a/.ci/gcb-contributor-membership-checker.yml b/.ci/gcb-contributor-membership-checker.yml index a8ce69bb3371..713918f91e35 100644 --- a/.ci/gcb-contributor-membership-checker.yml +++ b/.ci/gcb-contributor-membership-checker.yml @@ -1,17 +1,67 @@ --- steps: - - name: 'gcr.io/graphite-docker-images/membership-checker' - id: contributor-membership-checker - secretEnv: ["GITHUB_TOKEN", "GENERATE_DIFFS_TRIGGER", "RAKE_TESTS_TRIGGER", "COMMUNITY_CHECKER_TRIGGER"] - timeout: 8000s - args: - - "auto_run" - - $_PR_NUMBER - - $COMMIT_SHA - - $BRANCH_NAME - - $_HEAD_REPO_URL - - $_HEAD_BRANCH - - $_BASE_BRANCH + # The GCB / GH integration uses a shallow clone of the repo. We need to convert + # that to a full clone in order to work with it properly. + # https://cloud.google.com/source-repositories/docs/integrating-with-cloud-build#unshallowing_clones + - name: "gcr.io/cloud-builders/git" + args: + - fetch + - --unshallow + + # Configure git + - name: "gcr.io/cloud-builders/git" + args: + - config + - --global + - user.email + - magic-modules+differ@google.com + - name: "gcr.io/cloud-builders/git" + args: + - config + - --global + - user.name + - "Modular Magician Diff Process" + + # Display commit log for clarity + - name: "gcr.io/cloud-builders/git" + args: + - log + - "--oneline" + - "-n 10" + + # Find common ancestor commit and apply diff for the .ci folder. + - name: "gcr.io/cloud-builders/git" + id: findMergeBase + entrypoint: "bash" + args: + - "-c" + - | + git fetch origin main + if [ "$_BASE_BRANCH" != "main" ]; then + echo "Checking out .ci/ folder from main" + git checkout origin/main -- .ci/ + else + base_commit=$(git merge-base origin/main HEAD) + echo "Common ancestor commit: $base_commit" + git diff $base_commit origin/main -- .ci/ + git diff $base_commit origin/main -- .ci/ > /workspace/ci.diff + git apply ./ci.diff --allow-empty + fi + + - name: "gcr.io/graphite-docker-images/go-plus" + entrypoint: "/workspace/.ci/scripts/go-plus/magician/exec.sh" + id: contributor-membership-checker + secretEnv: + ["GITHUB_TOKEN", "GENERATE_DIFFS_TRIGGER", "COMMUNITY_CHECKER_TRIGGER"] + timeout: 8000s + args: + - "membership-checker" + - $_PR_NUMBER + - $COMMIT_SHA + - $BRANCH_NAME + - $_HEAD_REPO_URL + - $_HEAD_BRANCH + - $_BASE_BRANCH availableSecrets: secretManager: @@ -19,7 +69,5 @@ availableSecrets: env: GITHUB_TOKEN - versionName: projects/673497134629/secrets/ci-trigger-generate-diffs/versions/latest env: GENERATE_DIFFS_TRIGGER - - versionName: projects/673497134629/secrets/ci-trigger-rake-test/versions/latest - env: RAKE_TESTS_TRIGGER - versionName: projects/673497134629/secrets/ci-trigger-community-checker/versions/latest env: COMMUNITY_CHECKER_TRIGGER diff --git a/.ci/gcb-generate-diffs-new.yml b/.ci/gcb-generate-diffs-new.yml index 1a69cfd12b6e..5beac1aa4a95 100644 --- a/.ci/gcb-generate-diffs-new.yml +++ b/.ci/gcb-generate-diffs-new.yml @@ -182,23 +182,16 @@ steps: - $_PR_NUMBER - name: 'gcr.io/graphite-docker-images/go-plus' - entrypoint: '/workspace/.ci/scripts/go-plus/tgc-tester/test_tgc.sh' id: tgc-test allowFailure: true + entrypoint: '/workspace/.ci/scripts/go-plus/magician/exec.sh' secretEnv: ["GITHUB_TOKEN"] waitFor: ["tpgb-head", "tpgb-base", "tgc-head", "tgc-base"] - env: - - TEST_PROJECT=$_VALIDATOR_TEST_PROJECT - - TEST_FOLDER_ID=$_VALIDATOR_TEST_FOLDER - - TEST_ANCESTRY=$_VALIDATOR_TEST_ANCESTRY - - TEST_ORG_ID=$_VALIDATOR_TEST_ORG args: - - $_PR_NUMBER - - $COMMIT_SHA - - $BUILD_ID - - $PROJECT_ID - - "17" # Build step - - terraform-google-conversion + - 'test-tgc' + env: + - COMMIT_SHA=$COMMIT_SHA + - PR_NUMBER=$_PR_NUMBER - name: 'gcr.io/graphite-docker-images/go-plus' id: tgc-test-integration-0.12.31 @@ -240,29 +233,27 @@ steps: - "19" # Build step - terraform-google-conversion - - name: 'gcr.io/graphite-docker-images/bash-plus' + - name: 'gcr.io/graphite-docker-images/go-plus' id: tpgb-test - entrypoint: '/workspace/.ci/scripts/bash-plus/terraform-tester/test_terraform.sh' + allowFailure: true + entrypoint: '/workspace/.ci/scripts/go-plus/magician/exec.sh' secretEnv: ["GITHUB_TOKEN"] waitFor: ["tpgb-head", "tpgb-base"] + args: + - 'test-tpg' env: - VERSION=beta - COMMIT_SHA=$COMMIT_SHA - PR_NUMBER=$_PR_NUMBER - - name: 'gcr.io/graphite-docker-images/bash-plus' + - name: 'gcr.io/graphite-docker-images/go-plus' id: tpg-test - entrypoint: '/workspace/.ci/scripts/bash-plus/terraform-tester/test_terraform.sh' + allowFailure: true + entrypoint: '/workspace/.ci/scripts/go-plus/magician/exec.sh' secretEnv: ["GITHUB_TOKEN"] waitFor: ["tpg-head", "tpg-base"] args: - - 'ga' # remove after 07/2023 - - $_PR_NUMBER # remove after 07/2023 - - $COMMIT_SHA # remove after 07/2023 - - $BUILD_ID # remove after 07/2023 - - $PROJECT_ID # remove after 07/2023 - - GoogleCloudPlatform/magic-modules # remove after 07/2023 - - "21" # remove after 07/2023 + - 'test-tpg' env: - VERSION=ga - COMMIT_SHA=$COMMIT_SHA diff --git a/.ci/gcb-run-rake-tests.yml b/.ci/gcb-run-rake-tests.yml deleted file mode 100644 index 9d9072cbaf74..000000000000 --- a/.ci/gcb-run-rake-tests.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -steps: -- name: 'gcr.io/graphite-docker-images/build-environment' - id: run-rake-tests - entrypoint: bundle - dir: mmv1 - args: - - exec - - rake - - test diff --git a/.ci/infra/terraform/README.md b/.ci/infra/terraform/README.md index 94356c2f0bb6..6d09096610fd 100644 --- a/.ci/infra/terraform/README.md +++ b/.ci/infra/terraform/README.md @@ -66,3 +66,4 @@ Quotas that will need to be adjusted to support all tests: - compute.googleapis.com/regional_in_use_addresses (us-central1) - compute.googleapis.com/regional_static_addresses (us-central1) - compute.googleapis.com/routers +- compute.googleapis.com/c2_cpus (us-central1) diff --git a/.ci/infra/terraform/main.tf b/.ci/infra/terraform/main.tf index fa6d3a584595..a6bf36234b21 100644 --- a/.ci/infra/terraform/main.tf +++ b/.ci/infra/terraform/main.tf @@ -201,6 +201,7 @@ module "project-services" { "cloudtrace.googleapis.com", "composer.googleapis.com", "compute.googleapis.com", + "connectors.googleapis.com", "container.googleapis.com", "containeranalysis.googleapis.com", "containerfilesystem.googleapis.com", @@ -213,6 +214,7 @@ module "project-services" { "dataform.googleapis.com", "datafusion.googleapis.com", "datamigration.googleapis.com", + "datapipelines.googleapis.com", "dataplex.googleapis.com", "dataproc.googleapis.com", "datastore.googleapis.com", @@ -241,7 +243,6 @@ module "project-services" { "firebasestorage.googleapis.com", "firestore.googleapis.com", "firestorekeyvisualizer.googleapis.com", - "gameservices.googleapis.com", "gkebackup.googleapis.com", "gkeconnect.googleapis.com", "gkehub.googleapis.com", @@ -300,6 +301,7 @@ module "project-services" { "storage-api.googleapis.com", "storage-component.googleapis.com", "storage.googleapis.com", + "storageinsights.googleapis.com", "storagetransfer.googleapis.com", "test-file.sandbox.googleapis.com", "testing.googleapis.com", diff --git a/.ci/containers/membership-checker/build_trigger.go b/.ci/magician/cloudbuild/build_trigger.go similarity index 64% rename from .ci/containers/membership-checker/build_trigger.go rename to .ci/magician/cloudbuild/build_trigger.go index 2a6b90bc0cf8..04ecaf80c748 100644 --- a/.ci/containers/membership-checker/build_trigger.go +++ b/.ci/magician/cloudbuild/build_trigger.go @@ -1,4 +1,4 @@ -package main +package cloudbuild import ( "context" @@ -8,13 +8,13 @@ import ( "google.golang.org/api/cloudbuild/v1" ) -func triggerMMPresubmitRuns(projectId, repoName, commitSha string, substitutions map[string]string) error { +func (cb cloudBuild) TriggerMMPresubmitRuns(commitSha string, substitutions map[string]string) error { presubmitTriggerId, ok := os.LookupEnv("GENERATE_DIFFS_TRIGGER") if !ok { - return fmt.Errorf("Did not provide GENERATE_DIFFS_TRIGGER environment variable") + return fmt.Errorf("did not provide GENERATE_DIFFS_TRIGGER environment variable") } - err := triggerCloudBuildRun(projectId, presubmitTriggerId, repoName, commitSha, substitutions) + err := triggerCloudBuildRun(PROJECT_ID, presubmitTriggerId, REPO_NAME, commitSha, substitutions) if err != nil { return err } @@ -26,7 +26,7 @@ func triggerCloudBuildRun(projectId, triggerId, repoName, commitSha string, subs ctx := context.Background() c, err := cloudbuild.NewService(ctx) if err != nil { - return fmt.Errorf("Failed to create Cloud Build service client: %s", err) + return fmt.Errorf("failed to create Cloud Build service client: %s", err) } repoSource := &cloudbuild.RepoSource{ @@ -38,7 +38,7 @@ func triggerCloudBuildRun(projectId, triggerId, repoName, commitSha string, subs _, err = c.Projects.Triggers.Run(projectId, triggerId, repoSource).Do() if err != nil { - return fmt.Errorf("Failed to create Cloud Build run: %s", err) + return fmt.Errorf("failed to create Cloud Build run: %s", err) } fmt.Println("Started Cloud Build Run: ", triggerId) diff --git a/.ci/magician/cloudbuild/community.go b/.ci/magician/cloudbuild/community.go new file mode 100644 index 000000000000..ff7634b82d7d --- /dev/null +++ b/.ci/magician/cloudbuild/community.go @@ -0,0 +1,100 @@ +package cloudbuild + +import ( + "context" + "fmt" + "os" + + "google.golang.org/api/cloudbuild/v1" +) + +func (cb cloudBuild) ApproveCommunityChecker(prNumber, commitSha string) error { + buildId, err := getPendingBuildId(PROJECT_ID, commitSha) + if err != nil { + return err + } + + if buildId == "" { + return fmt.Errorf("Failed to find pending build for PR %s", prNumber) + } + + err = approveBuild(PROJECT_ID, buildId) + if err != nil { + return err + } + + return nil +} + +func (cb cloudBuild) GetAwaitingApprovalBuildLink(prNumber, commitSha string) (string, error) { + buildId, err := getPendingBuildId(PROJECT_ID, commitSha) + if err != nil { + return "", err + } + + if buildId == "" { + return "", fmt.Errorf("failed to find pending build for PR %s", prNumber) + } + + targetUrl := fmt.Sprintf("https://console.cloud.google.com/cloud-build/builds;region=global/%s?project=%s", buildId, PROJECT_ID) + + return targetUrl, nil +} + +func getPendingBuildId(projectId, commitSha string) (string, error) { + COMMUNITY_CHECKER_TRIGGER, ok := os.LookupEnv("COMMUNITY_CHECKER_TRIGGER") + if !ok { + return "", fmt.Errorf("Did not provide COMMUNITY_CHECKER_TRIGGER environment variable") + } + + ctx := context.Background() + + c, err := cloudbuild.NewService(ctx) + if err != nil { + return "", err + } + + filter := fmt.Sprintf("trigger_id=%s AND status=PENDING", COMMUNITY_CHECKER_TRIGGER) + // Builds will be sorted by createTime, descending order. + // 50 should be enough to include the one needs auto approval + pageSize := int64(50) + + builds, err := c.Projects.Builds.List(projectId).Filter(filter).PageSize(pageSize).Do() + if err != nil { + return "", err + } + + for _, build := range builds.Builds { + if build.Substitutions["COMMIT_SHA"] == commitSha { + return build.Id, nil + } + } + + return "", nil +} + +func approveBuild(projectId, buildId string) error { + ctx := context.Background() + + c, err := cloudbuild.NewService(ctx) + if err != nil { + return err + } + + name := fmt.Sprintf("projects/%s/builds/%s", projectId, buildId) + + approveBuildRequest := &cloudbuild.ApproveBuildRequest{ + ApprovalResult: &cloudbuild.ApprovalResult{ + Decision: "APPROVED", + }, + } + + _, err = c.Projects.Builds.Approve(name, approveBuildRequest).Do() + if err != nil { + return err + } + + fmt.Println("Auto approved build ", buildId) + + return nil +} diff --git a/.ci/magician/cloudbuild/constants.go b/.ci/magician/cloudbuild/constants.go new file mode 100644 index 000000000000..a64c5fce7c61 --- /dev/null +++ b/.ci/magician/cloudbuild/constants.go @@ -0,0 +1,4 @@ +package cloudbuild + +const PROJECT_ID = "graphite-docker-images" +const REPO_NAME = "magic-modules" diff --git a/.ci/magician/cloudbuild/init.go b/.ci/magician/cloudbuild/init.go new file mode 100644 index 000000000000..e2d29face5a2 --- /dev/null +++ b/.ci/magician/cloudbuild/init.go @@ -0,0 +1,14 @@ +package cloudbuild + +type cloudBuild bool + +type CloudBuild interface { + ApproveCommunityChecker(prNumber, commitSha string) error + GetAwaitingApprovalBuildLink(prNumber, commitSha string) (string, error) + TriggerMMPresubmitRuns(commitSha string, substitutions map[string]string) error +} + +func NewCloudBuildService() CloudBuild { + var x cloudBuild = true + return x +} diff --git a/.ci/magician/cmd/community_checker.go b/.ci/magician/cmd/community_checker.go new file mode 100644 index 000000000000..f1b8a943d08e --- /dev/null +++ b/.ci/magician/cmd/community_checker.go @@ -0,0 +1,106 @@ +/* +Copyright © 2023 NAME HERE +*/ +package cmd + +import ( + "fmt" + "magician/cloudbuild" + "magician/github" + "os" + + "github.com/spf13/cobra" +) + +type ccGithub interface { + GetPullRequestAuthor(prNumber string) (string, error) + GetUserType(user string) github.UserType + RemoveLabel(prNumber string, label string) error + PostBuildStatus(prNumber string, title string, state string, targetUrl string, commitSha string) error +} + +type ccCloudbuild interface { + TriggerMMPresubmitRuns(commitSha string, substitutions map[string]string) error +} + +// communityApprovalCmd represents the communityApproval command +var communityApprovalCmd = &cobra.Command{ + Use: "community-checker", + Short: "Run presubmit generate diffs for untrusted users and remove awaiting-approval label", + Long: `This command processes pull requests and performs various validations and actions based on the PR's metadata and author. + + The following PR details are expected as arguments: + 1. PR Number + 2. Commit SHA + 3. Branch Name + 4. Head Repo URL + 5. Head Branch + 6. Base Branch + + The command performs the following steps: + 1. Retrieve and print the provided pull request details. + 2. Get the author of the pull request and determine their user type. + 3. If the author is not a trusted user (neither a Core Contributor nor a Googler): + a. Trigger cloud builds with specific substitutions for the PR. + 4. For all pull requests, the 'awaiting-approval' label is removed. + `, + Run: func(cmd *cobra.Command, args []string) { + prNumber := args[0] + fmt.Println("PR Number: ", prNumber) + + commitSha := args[1] + fmt.Println("Commit SHA: ", commitSha) + + branchName := args[2] + fmt.Println("Branch Name: ", branchName) + + headRepoUrl := args[3] + fmt.Println("Head Repo URL: ", headRepoUrl) + + headBranch := args[4] + fmt.Println("Head Branch: ", headBranch) + + baseBranch := args[5] + fmt.Println("Base Branch: ", baseBranch) + + gh := github.NewGithubService() + cb := cloudbuild.NewCloudBuildService() + execCommunityChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch, gh, cb) + }, +} + +func execCommunityChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch string, gh ccGithub, cb ccCloudbuild) { + substitutions := map[string]string{ + "BRANCH_NAME": branchName, + "_PR_NUMBER": prNumber, + "_HEAD_REPO_URL": headRepoUrl, + "_HEAD_BRANCH": headBranch, + "_BASE_BRANCH": baseBranch, + } + + author, err := gh.GetPullRequestAuthor(prNumber) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + authorUserType := gh.GetUserType(author) + trusted := authorUserType == github.CoreContributorUserType || authorUserType == github.GooglerUserType + + // only triggers build for untrusted users (because trusted users will be handled by membership-checker) + if !trusted { + err = cb.TriggerMMPresubmitRuns(commitSha, substitutions) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + } + + // in community-checker job: + // remove awaiting-approval label from external contributor PRs + gh.RemoveLabel(prNumber, "awaiting-approval") +} + +func init() { + rootCmd.AddCommand(communityApprovalCmd) +} diff --git a/.ci/magician/cmd/community_checker_test.go b/.ci/magician/cmd/community_checker_test.go new file mode 100644 index 000000000000..dec9682e50a2 --- /dev/null +++ b/.ci/magician/cmd/community_checker_test.go @@ -0,0 +1,91 @@ +package cmd + +import ( + "magician/github" + "reflect" + "testing" +) + +func TestExecCommunityChecker_CoreContributorFlow(t *testing.T) { + gh := &mockGithub{ + author: "core_author", + userType: github.CoreContributorUserType, + calledMethods: make(map[string][][]any), + } + cb := &mockCloudBuild{ + calledMethods: make(map[string][][]any), + } + + execCommunityChecker("pr1", "sha1", "branch1", "url1", "head1", "base1", gh, cb) + + if _, ok := cb.calledMethods["TriggerMMPresubmitRuns"]; ok { + t.Fatal("Presubmit runs redundantly triggered for core contributor") + } + + method := "RemoveLabel" + expected := [][]any{{"pr1", "awaiting-approval"}} + if calls, ok := gh.calledMethods[method]; !ok { + t.Fatal("awaiting-approval label not removed for PR ") + } else if !reflect.DeepEqual(calls, expected) { + t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) + } + +} + +func TestExecCommunityChecker_GooglerFlow(t *testing.T) { + gh := &mockGithub{ + author: "googler_author", + userType: github.GooglerUserType, + calledMethods: make(map[string][][]any), + firstReviewer: "reviewer1", + previousReviewers: []string{github.GetRandomReviewer(), "reviewer3"}, + } + cb := &mockCloudBuild{ + calledMethods: make(map[string][][]any), + } + + execCommunityChecker("pr1", "sha1", "branch1", "url1", "head1", "base1", gh, cb) + + if _, ok := cb.calledMethods["TriggerMMPresubmitRuns"]; ok { + t.Fatal("Presubmit runs redundantly triggered for googler") + } + + method := "RemoveLabel" + expected := [][]any{{"pr1", "awaiting-approval"}} + if calls, ok := gh.calledMethods[method]; !ok { + t.Fatal("awaiting-approval label not removed for PR ") + } else if !reflect.DeepEqual(calls, expected) { + t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) + } +} + +func TestExecCommunityChecker_AmbiguousUserFlow(t *testing.T) { + gh := &mockGithub{ + author: "ambiguous_author", + userType: github.CommunityUserType, + calledMethods: make(map[string][][]any), + firstReviewer: github.GetRandomReviewer(), + previousReviewers: []string{github.GetRandomReviewer(), "reviewer3"}, + } + cb := &mockCloudBuild{ + calledMethods: make(map[string][][]any), + } + + execCommunityChecker("pr1", "sha1", "branch1", "url1", "head1", "base1", gh, cb) + + method := "TriggerMMPresubmitRuns" + expected := [][]any{{"sha1", map[string]string{"BRANCH_NAME": "branch1", "_BASE_BRANCH": "base1", "_HEAD_BRANCH": "head1", "_HEAD_REPO_URL": "url1", "_PR_NUMBER": "pr1"}}} + if calls, ok := cb.calledMethods[method]; !ok { + t.Fatal("Presubmit runs not triggered for ambiguous user") + } else if !reflect.DeepEqual(calls, expected) { + t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) + } + + method = "RemoveLabel" + expected = [][]any{{"pr1", "awaiting-approval"}} + if calls, ok := gh.calledMethods[method]; !ok { + t.Fatal("awaiting-approval label not removed for PR ") + } else if !reflect.DeepEqual(calls, expected) { + t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) + } +} diff --git a/.ci/magician/cmd/generate_comment.go b/.ci/magician/cmd/generate_comment.go new file mode 100644 index 000000000000..c38b68009189 --- /dev/null +++ b/.ci/magician/cmd/generate_comment.go @@ -0,0 +1,462 @@ +package cmd + +import ( + "fmt" + "magician/exec" + "magician/github" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/spf13/cobra" +) + +const allowBreakingChangesLabel = 4598495472 + +type gcGithub interface { + GetPullRequestLabelIDs(prNumber string) (map[int]struct{}, error) + PostBuildStatus(prNumber, title, state, targetURL, commitSha string) error + PostComment(prNumber, comment string) error +} + +type gcRunner interface { + GetCWD() string + Copy(src, dest string) error + RemoveAll(path string) error + PushDir(path string) error + PopDir() error + Run(name string, args, env []string) (string, error) + MustRun(name string, args, env []string) string +} + +type ProviderVersion string + +type Repository struct { + Name string // Name in GitHub (e.g. magic-modules) + Title string // Title for display (e.g. Magic Modules) + Path string // local Path once checked out, including Name + Version ProviderVersion + DiffCanFail bool // whether to allow the command to continue if cloning or diffing the repo fails +} + +var generateCommentCmd = &cobra.Command{ + Use: "generate-comment", + Short: "Run presubmit generate comment", + Long: `This command processes pull requests and performs various validations and actions based on the PR's metadata and author. + + The following PR details are expected as environment variables: + 1. BUILD_ID + 2. PROJECT_ID + 3. BUILD_STEP + 4. COMMIT_SHA + 5. PR_NUMBER + 6. GITHUB_TOKEN + + The command performs the following steps: + 1. Clone the tpg, tpgb, tfc, and tfoics repos from modular-magician. + 2. Compute the diffs between auto-pr-# and auto-pr-#-old branches. + 3. Run the diff processor to detect breaking changes. + 4. Run the missing test detector to detect missing tests for fields changed. + 5. Report the results in a PR comment. + 6. Run unit tests for the missing test detector. + `, + Run: func(cmd *cobra.Command, args []string) { + buildID := os.Getenv("BUILD_ID") + fmt.Println("Build ID: ", buildID) + + projectID := os.Getenv("PROJECT_ID") + fmt.Println("Project ID: ", projectID) + + buildStep := os.Getenv("BUILD_STEP") + fmt.Println("Build Step: ", buildStep) + + commit := os.Getenv("COMMIT_SHA") + fmt.Println("Commit SHA: ", commit) + + pr := os.Getenv("PR_NUMBER") + fmt.Println("PR Number: ", pr) + + githubToken, ok := os.LookupEnv("GITHUB_TOKEN") + if !ok { + fmt.Println("Did not provide GITHUB_TOKEN environment variable") + os.Exit(1) + } + + gh := github.NewGithubService() + rnr, err := exec.NewRunner() + if err != nil { + fmt.Println("Error creating a runner: ", err) + os.Exit(1) + } + execGenerateComment(buildID, projectID, buildStep, commit, pr, githubToken, gh, rnr) + }, +} + +func execGenerateComment(buildID, projectID, buildStep, commit, pr, githubToken string, gh gcGithub, r gcRunner) { + newBranch := "auto-pr-" + pr + oldBranch := "auto-pr-" + pr + "-old" + wd := r.GetCWD() + mmLocalPath := filepath.Join(wd, "..", "..") + tpgRepoName := "terraform-provider-google" + tpgLocalPath := filepath.Join(mmLocalPath, "..", "tpg") + tpgbRepoName := "terraform-provider-google-beta" + tpgbLocalPath := filepath.Join(mmLocalPath, "..", "tpgb") + tfoicsRepoName := "docs-examples" + tfoicsLocalPath := filepath.Join(mmLocalPath, "..", "tfoics") + // For backwards compatibility until at least Nov 15 2021 + tfcRepoName := "terraform-google-conversion" + tfcLocalPath := filepath.Join(mmLocalPath, "..", "tfc") + + var diffs string + for _, repo := range []Repository{ + { + Name: tpgRepoName, + Title: "Terraform GA", + Path: tpgLocalPath, + }, + { + Name: tpgbRepoName, + Title: "Terraform Beta", + Path: tpgbLocalPath, + }, + { + Name: tfcRepoName, + Title: "TF Conversion", + Path: tfcLocalPath, + DiffCanFail: true, + }, + { + Name: tfoicsRepoName, + Title: "TF OiCS", + Path: tfoicsLocalPath, + }, + } { + // TPG/TPGB difference + repoDiffs, err := cloneAndDiff(repo, oldBranch, newBranch, githubToken, r) + if err != nil { + fmt.Printf("Error cloning and diffing tpg repo: %v\n", err) + if !repo.DiffCanFail { + os.Exit(1) + } + } + diffs += "\n" + repoDiffs + } + + var showBreakingChangesFailed bool + var err error + diffProcessorPath := filepath.Join(mmLocalPath, "tools", "diff-processor") + // versionedBreakingChanges is a map of breaking change output by provider version. + versionedBreakingChanges := make(map[ProviderVersion]string, 2) + + for _, repo := range []Repository{ + { + Title: "TPG", + Path: tpgLocalPath, + Version: "ga", + }, + { + Title: "TPGB", + Path: tpgbLocalPath, + Version: "beta", + }, + } { + // TPG diff processor + err = buildDiffProcessor(diffProcessorPath, repo.Path, oldBranch, newBranch, r) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + output, err := computeBreakingChanges(diffProcessorPath, r) + if err != nil { + fmt.Println("Error computing TPG breaking changes: ", err) + showBreakingChangesFailed = true + } + versionedBreakingChanges[repo.Version] = output + err = addLabels(diffProcessorPath, githubToken, pr, r) + if err != nil { + fmt.Println("Error adding TPG labels to PR: ", err) + } + err = cleanDiffProcessor(diffProcessorPath, r) + if err != nil { + fmt.Println("Error cleaning up diff processor: ", err) + os.Exit(1) + } + } + + var breakingChanges string + if showBreakingChangesFailed { + breakingChanges = `## Breaking Change Detection Failed +The breaking change detector crashed during execution. This is usually due to the downstream provider(s) failing to compile. Please investigate or follow up with your reviewer.` + } else { + breakingChanges = combineBreakingChanges(versionedBreakingChanges["ga"], versionedBreakingChanges["beta"]) + } + + // Missing test detector + missingTests, err := detectMissingTests(mmLocalPath, tpgbLocalPath, oldBranch, r) + if err != nil { + fmt.Println("Error setting up missing test detector: ", err) + os.Exit(1) + } + + message := "Hi there, I'm the Modular magician. I've detected the following information about your changes:\n\n" + breakingState := "success" + if breakingChanges != "" { + message += breakingChanges + "\n\n" + + labels, err := gh.GetPullRequestLabelIDs(pr) + if err != nil { + fmt.Printf("Error getting pull request labels: %v\n", err) + os.Exit(1) + } + if _, ok := labels[allowBreakingChangesLabel]; !ok { + breakingState = "failure" + } + } + + if diffs == "" { + message += "## Diff report\nYour PR hasn't generated any diffs, but I'll let you know if a future commit does." + } else { + message += "## Diff report\nYour PR generated some diffs in downstreams - here they are.\n" + diffs + if missingTests != "" { + message += "\n" + missingTests + "\n" + } + } + + if err := gh.PostComment(pr, message); err != nil { + fmt.Printf("Error posting comment to PR %s: %v\n", pr, err) + } + + targetURL := fmt.Sprintf("https://console.cloud.google.com/cloud-build/builds;region=global/%s;step=%s?project=%s", buildID, buildStep, projectID) + if err := gh.PostBuildStatus(pr, "terraform-provider-breaking-change-test", breakingState, targetURL, commit); err != nil { + fmt.Printf("Error posting build status for pr %s commit %s: %v\n", pr, commit, err) + os.Exit(1) + } + + if err := r.PushDir(mmLocalPath); err != nil { + fmt.Println(err) + os.Exit(1) + } + if diffs := r.MustRun("git", []string{"diff", "HEAD", "origin/main", "tools/missing-test-detector"}, nil); diffs != "" { + fmt.Printf("Found diffs in missing test detector:\n%s\nRunning tests.\n", diffs) + if err := testTools(mmLocalPath, tpgbLocalPath, pr, commit, buildID, buildStep, projectID, gh, r); err != nil { + fmt.Printf("Error testing tools in %s: %v\n", mmLocalPath, err) + os.Exit(1) + } + } + if err := r.PopDir(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func cloneAndDiff(repo Repository, oldBranch, newBranch, githubToken string, r gcRunner) (string, error) { + // Clone the repo to the desired repo.Path. + url := fmt.Sprintf("https://modular-magician:%s@github.com/modular-magician/%s", githubToken, repo.Name) + if _, err := r.Run("git", []string{"clone", "-b", newBranch, url, repo.Path}, nil); err != nil { + return "", fmt.Errorf("error cloning %s: %v\n", repo.Name, err) + } + + // Push dir to the newly cloned repo. + if err := r.PushDir(repo.Path); err != nil { + return "", err + } + if _, err := r.Run("git", []string{"fetch", "origin", oldBranch}, nil); err != nil { + return "", fmt.Errorf("error fetching branch %s in repo %s: %v\n", oldBranch, repo.Name, err) + } + + // Return summary, if any, and return to original directory. + if summary, err := r.Run("git", []string{"diff", "origin/" + oldBranch, "origin/" + newBranch, "--shortstat"}, nil); err != nil { + return "", fmt.Errorf("error diffing %s and %s: %v\n", oldBranch, newBranch, err) + } else if summary != "" { + summary = strings.TrimSuffix(summary, "\n") + return fmt.Sprintf("%s: [Diff](https://github.com/modular-magician/%s/compare/%s..%s) (%s)", repo.Title, repo.Name, oldBranch, newBranch, summary), r.PopDir() + } + return "", r.PopDir() +} + +// Build the diff processor for tpg or tpgb +func buildDiffProcessor(diffProcessorPath, providerLocalPath, oldBranch, newBranch string, r gcRunner) error { + if err := r.PushDir(diffProcessorPath); err != nil { + return err + } + for _, path := range []string{"old", "new"} { + if err := r.Copy(providerLocalPath, filepath.Join(diffProcessorPath, path)); err != nil { + return err + } + } + if _, err := r.Run("make", []string{"build"}, []string{"OLD_REF=" + oldBranch, "NEW_REF=" + newBranch}); err != nil { + return fmt.Errorf("Error running make build in %s: %v\n", diffProcessorPath, err) + } + return r.PopDir() +} + +func computeBreakingChanges(diffProcessorPath string, r gcRunner) (string, error) { + if err := r.PushDir(diffProcessorPath); err != nil { + return "", err + } + breakingChanges, err := r.Run("bin/diff-processor", []string{"breaking-changes"}, nil) + if err != nil { + return "", err + } + return breakingChanges, r.PopDir() +} + +func addLabels(diffProcessorPath, githubToken, pr string, r gcRunner) error { + if err := r.PushDir(diffProcessorPath); err != nil { + return err + } + output, err := r.Run("bin/diff-processor", []string{"add-labels", pr}, []string{fmt.Sprintf("GITHUB_TOKEN=%s", githubToken)}) + fmt.Println(output) + if err != nil { + return err + } + return r.PopDir() +} + +func cleanDiffProcessor(diffProcessorPath string, r gcRunner) error { + for _, path := range []string{"old", "new", "bin"} { + if err := r.RemoveAll(filepath.Join(diffProcessorPath, path)); err != nil { + return err + } + } + return nil +} + +// Get the breaking change message including the unique tpg messages and all tpgb messages. +func combineBreakingChanges(tpgBreaking, tpgbBreaking string) string { + var allMessages []string + if tpgBreaking == "" { + if tpgbBreaking == "" { + return "" + } + allMessages = strings.Split(tpgbBreaking, "\n") + } else if tpgbBreaking == "" { + allMessages = strings.Split(tpgBreaking, "\n") + } else { + dashExp := regexp.MustCompile("-.*") + tpgMessages := strings.Split(tpgBreaking, "\n") + tpgbMessages := strings.Split(tpgbBreaking, "\n") + tpgbSet := make(map[string]struct{}, len(tpgbMessages)) + var tpgUnique []string + for _, message := range tpgbMessages { + simple := dashExp.ReplaceAllString(message, "") + tpgbSet[simple] = struct{}{} + } + for _, message := range tpgMessages { + simple := dashExp.ReplaceAllString(message, "") + if _, ok := tpgbSet[simple]; !ok { + tpgUnique = append(tpgUnique, message) + } + } + allMessages = append(tpgUnique, tpgbMessages...) + } + if len(allMessages) > 0 { + return `Breaking Change(s) Detected +The following breaking change(s) were detected within your pull request. + +* ` + strings.Join(allMessages, "\n* ") + ` + +If you believe this detection to be incorrect please raise the concern with your reviewer. +If you intend to make this change you will need to wait for a [major release](https://www.terraform.io/plugin/sdkv2/best-practices/versioning#example-major-number-increments) window. +An ` + "`override-breaking-change`" + `label can be added to allow merging. +` + } + return "" +} + +// Run the missing test detector and return the results. +// Returns an empty string unless there are missing tests. +// Error will be nil unless an error occurs during setup. +func detectMissingTests(mmLocalPath, tpgbLocalPath, oldBranch string, r gcRunner) (string, error) { + tpgbLocalPathOld := tpgbLocalPath + "old" + + if err := r.Copy(tpgbLocalPath, tpgbLocalPathOld); err != nil { + return "", err + } + + if err := r.PushDir(tpgbLocalPathOld); err != nil { + return "", err + } + if _, err := r.Run("git", []string{"checkout", "origin/" + oldBranch}, nil); err != nil { + return "", err + } + + if err := updatePackageName("old", tpgbLocalPathOld, r); err != nil { + return "", err + } + if err := updatePackageName("new", tpgbLocalPath, r); err != nil { + return "", err + } + if err := r.PopDir(); err != nil { + return "", err + } + + missingTestDetectorPath := filepath.Join(mmLocalPath, "tools", "missing-test-detector") + if err := r.PushDir(missingTestDetectorPath); err != nil { + return "", err + } + if _, err := r.Run("go", []string{"mod", "edit", "-replace", fmt.Sprintf("google/provider/%s=%s", "new", tpgbLocalPath)}, nil); err != nil { + fmt.Printf("Error running go mod edit: %v\n", err) + } + if _, err := r.Run("go", []string{"mod", "edit", "-replace", fmt.Sprintf("google/provider/%s=%s", "old", tpgbLocalPathOld)}, nil); err != nil { + fmt.Printf("Error running go mod edit: %v\n", err) + } + if _, err := r.Run("go", []string{"mod", "tidy"}, nil); err != nil { + fmt.Printf("Error running go mod tidy: %v\n", err) + } + missingTests, err := r.Run("go", []string{"run", ".", fmt.Sprintf("-services-dir=%s/google-beta/services", tpgbLocalPath)}, nil) + if err != nil { + fmt.Printf("Error running missing test detector: %v\n", err) + missingTests = "" + } else { + fmt.Printf("Successfully ran missing test detector:\n%s\n", missingTests) + } + return missingTests, r.PopDir() +} + +// Update the provider package name to the given name in the given path. +// name should be either "old" or "new". +func updatePackageName(name, path string, r gcRunner) error { + oldPackageName := "github.com/hashicorp/terraform-provider-google-beta" + newPackageName := "google/provider/" + name + fmt.Printf("Updating package name in %s from %s to %s\n", path, oldPackageName, newPackageName) + if err := r.PushDir(path); err != nil { + return err + } + if _, err := r.Run("find", []string{".", "-type", "f", "-name", "*.go", "-exec", "sed", "-i.bak", fmt.Sprintf("s~%s~%s~g", oldPackageName, newPackageName), "{}", "+"}, nil); err != nil { + return fmt.Errorf("error running find: %v\n", err) + } + if _, err := r.Run("sed", []string{"-i.bak", fmt.Sprintf("s|%s|%s|g", oldPackageName, newPackageName), "go.mod"}, nil); err != nil { + return fmt.Errorf("error running sed: %v\n", err) + } + if _, err := r.Run("sed", []string{"-i.bak", fmt.Sprintf("s|%s|%s|g", oldPackageName, newPackageName), "go.sum"}, nil); err != nil { + return fmt.Errorf("error running sed: %v\n", err) + } + return r.PopDir() +} + +// Run unit tests for the missing test detector and diff processor. +// Report results using Github API. +func testTools(mmLocalPath, tpgbLocalPath, pr, commit, buildID, buildStep, projectID string, gh gcGithub, r gcRunner) error { + missingTestDetectorPath := filepath.Join(mmLocalPath, "tools", "missing-test-detector") + r.PushDir(missingTestDetectorPath) + if _, err := r.Run("go", []string{"mod", "tidy"}, nil); err != nil { + fmt.Printf("error running go mod tidy in %s: %v\n", missingTestDetectorPath, err) + } + servicesDir := filepath.Join(tpgbLocalPath, "google-beta", "services") + state := "success" + if _, err := r.Run("go", []string{"test"}, []string{"SERVICES_DIR=" + servicesDir}); err != nil { + fmt.Printf("error from running go test in %s: %v\n", missingTestDetectorPath, err) + state = "failure" + } + targetURL := fmt.Sprintf("https://console.cloud.google.com/cloud-build/builds;region=global/%s;step=%s?project=%s", buildID, buildStep, projectID) + if err := gh.PostBuildStatus(pr, "unit-tests-missing-test-detector", state, targetURL, commit); err != nil { + return err + } + return r.PopDir() +} + +func init() { + rootCmd.AddCommand(generateCommentCmd) +} diff --git a/.ci/magician/cmd/generate_comment_test.go b/.ci/magician/cmd/generate_comment_test.go new file mode 100644 index 000000000000..b05acfa4f8e3 --- /dev/null +++ b/.ci/magician/cmd/generate_comment_test.go @@ -0,0 +1,92 @@ +package cmd + +import ( + "reflect" + "testing" +) + +func TestExecGenerateComment(t *testing.T) { + mr := NewMockRunner() + gh := &mockGithub{ + calledMethods: make(map[string][][]any), + } + execGenerateComment("build1", "project1", "17", "sha1", "pr1", "*******", gh, mr) + + for method, expectedCalls := range map[string][][]any{ + "Copy": { + {"/mock/dir/tpg", "/mock/dir/magic-modules/tools/diff-processor/old"}, + {"/mock/dir/tpg", "/mock/dir/magic-modules/tools/diff-processor/new"}, + {"/mock/dir/tpgb", "/mock/dir/magic-modules/tools/diff-processor/old"}, + {"/mock/dir/tpgb", "/mock/dir/magic-modules/tools/diff-processor/new"}, + {"/mock/dir/tpgb", "/mock/dir/tpgbold"}, + }, + "RemoveAll": { + {"/mock/dir/magic-modules/tools/diff-processor/old"}, + {"/mock/dir/magic-modules/tools/diff-processor/new"}, + {"/mock/dir/magic-modules/tools/diff-processor/bin"}, + {"/mock/dir/magic-modules/tools/diff-processor/old"}, + {"/mock/dir/magic-modules/tools/diff-processor/new"}, + {"/mock/dir/magic-modules/tools/diff-processor/bin"}, + }, + "Run": { + {"/mock/dir/magic-modules/.ci/magician", "git", []string{"clone", "-b", "auto-pr-pr1", "https://modular-magician:*******@github.com/modular-magician/terraform-provider-google", "/mock/dir/tpg"}, []string(nil)}, + {"/mock/dir/tpg", "git", []string{"fetch", "origin", "auto-pr-pr1-old"}, []string(nil)}, + {"/mock/dir/tpg", "git", []string{"diff", "origin/auto-pr-pr1-old", "origin/auto-pr-pr1", "--shortstat"}, []string(nil)}, + {"/mock/dir/magic-modules/.ci/magician", "git", []string{"clone", "-b", "auto-pr-pr1", "https://modular-magician:*******@github.com/modular-magician/terraform-provider-google-beta", "/mock/dir/tpgb"}, []string(nil)}, + {"/mock/dir/tpgb", "git", []string{"fetch", "origin", "auto-pr-pr1-old"}, []string(nil)}, + {"/mock/dir/tpgb", "git", []string{"diff", "origin/auto-pr-pr1-old", "origin/auto-pr-pr1", "--shortstat"}, []string(nil)}, + {"/mock/dir/magic-modules/.ci/magician", "git", []string{"clone", "-b", "auto-pr-pr1", "https://modular-magician:*******@github.com/modular-magician/terraform-google-conversion", "/mock/dir/tfc"}, []string(nil)}, + {"/mock/dir/tfc", "git", []string{"fetch", "origin", "auto-pr-pr1-old"}, []string(nil)}, + {"/mock/dir/tfc", "git", []string{"diff", "origin/auto-pr-pr1-old", "origin/auto-pr-pr1", "--shortstat"}, []string(nil)}, + {"/mock/dir/magic-modules/.ci/magician", "git", []string{"clone", "-b", "auto-pr-pr1", "https://modular-magician:*******@github.com/modular-magician/docs-examples", "/mock/dir/tfoics"}, []string(nil)}, + {"/mock/dir/tfoics", "git", []string{"fetch", "origin", "auto-pr-pr1-old"}, []string(nil)}, + {"/mock/dir/tfoics", "git", []string{"diff", "origin/auto-pr-pr1-old", "origin/auto-pr-pr1", "--shortstat"}, []string(nil)}, + {"/mock/dir/magic-modules/tools/diff-processor", "make", []string{"build"}, []string{"OLD_REF=auto-pr-pr1-old", "NEW_REF=auto-pr-pr1"}}, + {"/mock/dir/magic-modules/tools/diff-processor", "bin/diff-processor", []string{"breaking-changes"}, []string(nil)}, + {"/mock/dir/magic-modules/tools/diff-processor", "bin/diff-processor", []string{"add-labels", "pr1"}, []string{"GITHUB_TOKEN=*******"}}, + {"/mock/dir/magic-modules/tools/diff-processor", "make", []string{"build"}, []string{"OLD_REF=auto-pr-pr1-old", "NEW_REF=auto-pr-pr1"}}, + {"/mock/dir/magic-modules/tools/diff-processor", "bin/diff-processor", []string{"breaking-changes"}, []string(nil)}, + {"/mock/dir/magic-modules/tools/diff-processor", "bin/diff-processor", []string{"add-labels", "pr1"}, []string{"GITHUB_TOKEN=*******"}}, + {"/mock/dir/tpgbold", "git", []string{"checkout", "origin/auto-pr-pr1-old"}, []string(nil)}, + {"/mock/dir/tpgbold", "find", []string{".", "-type", "f", "-name", "*.go", "-exec", "sed", "-i.bak", "s~github.com/hashicorp/terraform-provider-google-beta~google/provider/old~g", "{}", "+"}, []string(nil)}, + {"/mock/dir/tpgbold", "sed", []string{"-i.bak", "s|github.com/hashicorp/terraform-provider-google-beta|google/provider/old|g", "go.mod"}, []string(nil)}, + {"/mock/dir/tpgbold", "sed", []string{"-i.bak", "s|github.com/hashicorp/terraform-provider-google-beta|google/provider/old|g", "go.sum"}, []string(nil)}, + {"/mock/dir/tpgb", "find", []string{".", "-type", "f", "-name", "*.go", "-exec", "sed", "-i.bak", "s~github.com/hashicorp/terraform-provider-google-beta~google/provider/new~g", "{}", "+"}, []string(nil)}, + {"/mock/dir/tpgb", "sed", []string{"-i.bak", "s|github.com/hashicorp/terraform-provider-google-beta|google/provider/new|g", "go.mod"}, []string(nil)}, + {"/mock/dir/tpgb", "sed", []string{"-i.bak", "s|github.com/hashicorp/terraform-provider-google-beta|google/provider/new|g", "go.sum"}, []string(nil)}, + {"/mock/dir/magic-modules/tools/missing-test-detector", "go", []string{"mod", "edit", "-replace", "google/provider/new=/mock/dir/tpgb"}, []string(nil)}, + {"/mock/dir/magic-modules/tools/missing-test-detector", "go", []string{"mod", "edit", "-replace", "google/provider/old=/mock/dir/tpgbold"}, []string(nil)}, + {"/mock/dir/magic-modules/tools/missing-test-detector", "go", []string{"mod", "tidy"}, []string(nil)}, + {"/mock/dir/magic-modules/tools/missing-test-detector", "go", []string{"run", ".", "-services-dir=/mock/dir/tpgb/google-beta/services"}, []string(nil)}, + {"/mock/dir/magic-modules", "git", []string{"diff", "HEAD", "origin/main", "tools/missing-test-detector"}, []string(nil)}}, + } { + if actualCalls, ok := mr.calledMethods[method]; !ok { + t.Fatalf("Found no calls for %s", method) + } else if len(actualCalls) != len(expectedCalls) { + t.Fatalf("Unexpected number of calls for %s, got %d, expected %d", method, len(actualCalls), len(expectedCalls)) + } else { + for i, actualParams := range actualCalls { + if expectedParams := expectedCalls[i]; !reflect.DeepEqual(actualParams, expectedParams) { + t.Fatalf("Wrong params for call %d to %s, got %v, expected %v", i, method, actualParams, expectedParams) + } + } + } + } + + for method, expectedCalls := range map[string][][]any{ + "PostBuildStatus": {{"pr1", "terraform-provider-breaking-change-test", "success", "https://console.cloud.google.com/cloud-build/builds;region=global/build1;step=17?project=project1", "sha1"}}, + "PostComment": {{"pr1", "Hi there, I'm the Modular magician. I've detected the following information about your changes:\n\n## Diff report\nYour PR generated some diffs in downstreams - here they are.\n\nTerraform GA: [Diff](https://github.com/modular-magician/terraform-provider-google/compare/auto-pr-pr1-old..auto-pr-pr1) ( 2 files changed, 40 insertions(+))\nTerraform Beta: [Diff](https://github.com/modular-magician/terraform-provider-google-beta/compare/auto-pr-pr1-old..auto-pr-pr1) ( 2 files changed, 40 insertions(+))\nTF Conversion: [Diff](https://github.com/modular-magician/terraform-google-conversion/compare/auto-pr-pr1-old..auto-pr-pr1) ( 1 file changed, 10 insertions(+))\n\n## Missing test report\nYour PR includes resource fields which are not covered by any test.\n\nResource: `google_folder_access_approval_settings` (3 total tests)\nPlease add an acceptance test which includes these fields. The test should include the following:\n\n```hcl\nresource \"google_folder_access_approval_settings\" \"primary\" {\n uncovered_field = # value needed\n}\n\n```\n\n"}}, + } { + if actualCalls, ok := gh.calledMethods[method]; !ok { + t.Fatalf("Found no calls for %s", method) + } else if len(actualCalls) != len(expectedCalls) { + t.Fatalf("Unexpected number of calls for %s, got %d, expected %d", method, len(actualCalls), len(expectedCalls)) + } else { + for i, actualParams := range actualCalls { + if expectedParams := expectedCalls[i]; !reflect.DeepEqual(actualParams, expectedParams) { + t.Fatalf("Wrong params for call %d to %s, got %v, expected %v", i, method, actualParams, expectedParams) + } + } + } + } +} diff --git a/.ci/magician/cmd/membership_checker.go b/.ci/magician/cmd/membership_checker.go new file mode 100644 index 000000000000..df837ff81e33 --- /dev/null +++ b/.ci/magician/cmd/membership_checker.go @@ -0,0 +1,176 @@ +/* +Copyright © 2023 NAME HERE +*/ +package cmd + +import ( + "fmt" + "magician/cloudbuild" + "magician/github" + "os" + + "github.com/spf13/cobra" +) + +type mcGithub interface { + GetPullRequestAuthor(prNumber string) (string, error) + GetUserType(user string) github.UserType + GetPullRequestRequestedReviewer(prNumber string) (string, error) + GetPullRequestPreviousAssignedReviewers(prNumber string) ([]string, error) + RequestPullRequestReviewer(prNumber string, reviewer string) error + PostComment(prNumber string, comment string) error + AddLabel(prNumber string, label string) error + PostBuildStatus(prNumber string, title string, state string, targetUrl string, commitSha string) error +} + +type mcCloudbuild interface { + ApproveCommunityChecker(prNumber, commitSha string) error + GetAwaitingApprovalBuildLink(prNumber, commitSha string) (string, error) + TriggerMMPresubmitRuns(commitSha string, substitutions map[string]string) error +} + +// membershipCheckerCmd represents the membershipChecker command +var membershipCheckerCmd = &cobra.Command{ + Use: "membership-checker", + Short: "Assigns reviewers and manages pull request processing based on the author's trust level.", + Long: `This command conducts a series of validations and actions based on the details and authorship of a provided pull request. + + The command expects the following pull request details as arguments: + 1. PR Number + 2. Commit SHA + 3. Branch Name + 4. Head Repo URL + 5. Head Branch + 6. Base Branch + + It then performs the following operations: + 1. Extracts and displays the pull request details. + 2. Fetches the author of the pull request and determines their contribution type. + 3. If the author is not a core contributor: + a. Identifies the initially requested reviewer and those who previously reviewed this PR. + b. Determines and requests reviewers based on the above. + c. Posts comments tailored to the contribution type, the trust level of the contributor, and the primary reviewer. + 4. For trusted authors (Core Contributors and Googlers): + a. Triggers generate-diffs using the provided PR details. + b. Automatically approves the community-checker run. + 5. For external or untrusted contributors: + a. Adds the 'awaiting-approval' label. + b. Posts a link prompting approval for the build. + `, + Args: cobra.ExactArgs(6), + Run: func(cmd *cobra.Command, args []string) { + prNumber := args[0] + fmt.Println("PR Number: ", prNumber) + + commitSha := args[1] + fmt.Println("Commit SHA: ", commitSha) + + branchName := args[2] + fmt.Println("Branch Name: ", branchName) + + headRepoUrl := args[3] + fmt.Println("Head Repo URL: ", headRepoUrl) + + headBranch := args[4] + fmt.Println("Head Branch: ", headBranch) + + baseBranch := args[5] + fmt.Println("Base Branch: ", baseBranch) + + gh := github.NewGithubService() + cb := cloudbuild.NewCloudBuildService() + execMembershipChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch, gh, cb) + }, +} + +func execMembershipChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch string, gh mcGithub, cb mcCloudbuild) { + substitutions := map[string]string{ + "BRANCH_NAME": branchName, + "_PR_NUMBER": prNumber, + "_HEAD_REPO_URL": headRepoUrl, + "_HEAD_BRANCH": headBranch, + "_BASE_BRANCH": baseBranch, + } + + author, err := gh.GetPullRequestAuthor(prNumber) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + authorUserType := gh.GetUserType(author) + trusted := authorUserType == github.CoreContributorUserType || authorUserType == github.GooglerUserType + + if authorUserType != github.CoreContributorUserType { + fmt.Println("Not core contributor - assigning reviewer") + + firstRequestedReviewer, err := gh.GetPullRequestRequestedReviewer(prNumber) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + previouslyInvolvedReviewers, err := gh.GetPullRequestPreviousAssignedReviewers(prNumber) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + reviewersToRequest, newPrimaryReviewer := github.ChooseReviewers(firstRequestedReviewer, previouslyInvolvedReviewers) + + for _, reviewer := range reviewersToRequest { + err = gh.RequestPullRequestReviewer(prNumber, reviewer) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + } + + if newPrimaryReviewer != "" { + comment := github.FormatReviewerComment(newPrimaryReviewer, authorUserType, trusted) + err = gh.PostComment(prNumber, comment) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + } + } + + // auto_run(contributor-membership-checker) will be run on every commit or /gcbrun: + // only triggers builds for trusted users + if trusted { + err = cb.TriggerMMPresubmitRuns(commitSha, substitutions) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + } + + // in contributor-membership-checker job: + // 1. auto approve community-checker run for trusted users + // 2. add awaiting-approval label to external contributor PRs + if trusted { + err = cb.ApproveCommunityChecker(prNumber, commitSha) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + } else { + gh.AddLabel(prNumber, "awaiting-approval") + targetUrl, err := cb.GetAwaitingApprovalBuildLink(prNumber, commitSha) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + err = gh.PostBuildStatus(prNumber, "Approve Build", "success", targetUrl, commitSha) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + } +} + +func init() { + rootCmd.AddCommand(membershipCheckerCmd) +} diff --git a/.ci/magician/cmd/membership_checker_test.go b/.ci/magician/cmd/membership_checker_test.go new file mode 100644 index 000000000000..b79f3fd13699 --- /dev/null +++ b/.ci/magician/cmd/membership_checker_test.go @@ -0,0 +1,182 @@ +package cmd + +import ( + "magician/github" + "reflect" + "regexp" + "testing" +) + +func TestExecMembershipChecker_CoreContributorFlow(t *testing.T) { + gh := &mockGithub{ + author: "core_author", + userType: github.CoreContributorUserType, + calledMethods: make(map[string][][]any), + } + cb := &mockCloudBuild{ + calledMethods: make(map[string][][]any), + } + + execMembershipChecker("pr1", "sha1", "branch1", "url1", "head1", "base1", gh, cb) + + if _, ok := gh.calledMethods["RequestPullRequestReviewer"]; ok { + t.Fatal("Incorrectly requested review for core contributor") + } + + method := "TriggerMMPresubmitRuns" + expected := [][]any{{"sha1", map[string]string{"BRANCH_NAME": "branch1", "_BASE_BRANCH": "base1", "_HEAD_BRANCH": "head1", "_HEAD_REPO_URL": "url1", "_PR_NUMBER": "pr1"}}} + if calls, ok := cb.calledMethods[method]; !ok { + t.Fatal("Presubmit runs not triggered for core author") + } else if !reflect.DeepEqual(calls, expected) { + t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) + } + + method = "ApproveCommunityChecker" + expected = [][]any{{"pr1", "sha1"}} + if calls, ok := cb.calledMethods[method]; !ok { + t.Fatal("Community checker not approved for core author") + } else if !reflect.DeepEqual(calls, expected) { + t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) + } + +} + +func TestExecMembershipChecker_GooglerFlow(t *testing.T) { + gh := &mockGithub{ + author: "googler_author", + userType: github.GooglerUserType, + calledMethods: make(map[string][][]any), + firstReviewer: "reviewer1", + previousReviewers: []string{github.GetRandomReviewer(), "reviewer3"}, + } + cb := &mockCloudBuild{ + calledMethods: make(map[string][][]any), + } + + execMembershipChecker("pr1", "sha1", "branch1", "url1", "head1", "base1", gh, cb) + + method := "RequestPullRequestReviewer" + if calls, ok := gh.calledMethods[method]; !ok { + t.Fatal("Review wasn't requested for googler") + } else if len(calls) != 1 { + t.Fatalf("Wrong number of calls for %s, got %d, expected 1", method, len(calls)) + } else if params := calls[0]; len(params) != 2 { + t.Fatalf("Wrong number of params for %s, got %d, expected 2", method, len(params)) + } else if param := params[0]; param != "pr1" { + t.Fatalf("Wrong first param for %s, got %v, expected pr1", method, param) + } else if param := params[1]; !github.IsTeamReviewer(param.(string)) { + t.Fatalf("Wrong second param for %s, got %v, expected a team reviewer", method, param) + } + + method = "TriggerMMPresubmitRuns" + expected := [][]any{{"sha1", map[string]string{"BRANCH_NAME": "branch1", "_BASE_BRANCH": "base1", "_HEAD_BRANCH": "head1", "_HEAD_REPO_URL": "url1", "_PR_NUMBER": "pr1"}}} + if calls, ok := cb.calledMethods[method]; !ok { + t.Fatal("Presubmit runs not triggered for googler") + } else if !reflect.DeepEqual(calls, expected) { + t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) + } + + method = "ApproveCommunityChecker" + expected = [][]any{{"pr1", "sha1"}} + if calls, ok := cb.calledMethods[method]; !ok { + t.Fatal("Community checker not approved for googler") + } else if !reflect.DeepEqual(calls, expected) { + t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) + } +} + +func TestExecMembershipChecker_AmbiguousUserFlow(t *testing.T) { + gh := &mockGithub{ + author: "ambiguous_author", + userType: github.CommunityUserType, + calledMethods: make(map[string][][]any), + firstReviewer: github.GetRandomReviewer(), + previousReviewers: []string{github.GetRandomReviewer(), "reviewer3"}, + } + cb := &mockCloudBuild{ + calledMethods: make(map[string][][]any), + } + + execMembershipChecker("pr1", "sha1", "branch1", "url1", "head1", "base1", gh, cb) + + method := "RequestPullRequestReviewer" + if calls, ok := gh.calledMethods[method]; !ok { + t.Fatal("Review wasn't requested for ambiguous user") + } else if len(calls) != 1 { + t.Fatalf("Wrong number of calls for %s, got %d, expected 1", method, len(calls)) + } else if params := calls[0]; len(params) != 2 { + t.Fatalf("Wrong number of params for %s, got %d, expected 2", method, len(params)) + } else if param := params[0]; param != "pr1" { + t.Fatalf("Wrong first param for %s, got %v, expected pr1", method, param) + } else if param := params[1]; !github.IsTeamReviewer(param.(string)) { + t.Fatalf("Wrong second param for %s, got %v, expected a team reviewer", method, param) + } + + method = "AddLabel" + expected := [][]any{{"pr1", "awaiting-approval"}} + if calls, ok := gh.calledMethods[method]; !ok { + t.Fatal("Label wasn't posted to pull request") + } else if !reflect.DeepEqual(calls, expected) { + t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) + } + + method = "GetAwaitingApprovalBuildLink" + expected = [][]any{{"pr1", "sha1"}} + if calls, ok := cb.calledMethods[method]; !ok { + t.Fatal("Awaiting approval build link wasn't gotten from pull request") + } else if !reflect.DeepEqual(calls, expected) { + t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) + } + + if _, ok := gh.calledMethods["ApproveCommunityChecker"]; ok { + t.Fatal("Incorrectly approved community checker for ambiguous user") + } + + if _, ok := gh.calledMethods["TriggerMMPresubmitRuns"]; ok { + t.Fatal("Incorrectly triggered presubmit runs for ambiguous user") + } +} + +func TestExecMembershipChecker_CommentForNewPrimaryReviewer(t *testing.T) { + gh := &mockGithub{ + author: "googler_author", + userType: github.GooglerUserType, + calledMethods: make(map[string][][]any), + firstReviewer: "", + previousReviewers: []string{"reviewer3"}, + } + cb := &mockCloudBuild{ + calledMethods: make(map[string][][]any), + } + + execMembershipChecker("pr1", "sha1", "branch1", "url1", "head1", "base1", gh, cb) + + method := "RequestPullRequestReviewer" + if calls, ok := gh.calledMethods[method]; !ok { + t.Fatal("Review wasn't requested for googler") + } else if len(calls) != 1 { + t.Fatalf("Wrong number of calls for %s, got %d, expected 1", method, len(calls)) + } else if params := calls[0]; len(params) != 2 { + t.Fatalf("Wrong number of params for %s, got %d, expected 2", method, len(params)) + } else if param := params[0]; param != "pr1" { + t.Fatalf("Wrong first param for %s, got %v, expected pr1", method, param) + } else if param := params[1]; !github.IsTeamReviewer(param.(string)) { + t.Fatalf("Wrong second param for %s, got %v, expected a team reviewer", method, param) + } + + method = "PostComment" + reviewerExp := regexp.MustCompile(`@(.*?),`) + if calls, ok := gh.calledMethods[method]; !ok { + t.Fatal("Comment wasn't posted stating user status") + } else if len(calls) != 1 { + t.Fatalf("Wrong number of calls for %s, got %d, expected 1", method, len(calls)) + } else if params := calls[0]; len(params) != 2 { + t.Fatalf("Wrong number of params for %s, got %d, expected 2", method, len(params)) + } else if param := params[0]; param != "pr1" { + t.Fatalf("Wrong first param for %s, got %v, expected pr1", method, param) + } else if param, ok := params[1].(string); !ok { + t.Fatalf("Got non-string second param for %s", method) + } else if submatches := reviewerExp.FindStringSubmatch(param); len(submatches) != 2 || !github.IsTeamReviewer(submatches[1]) { + t.Fatalf("%s called without a team reviewer (found %v) in the comment: %s", method, submatches, param) + } +} diff --git a/.ci/magician/cmd/mock_cloudbuild_test.go b/.ci/magician/cmd/mock_cloudbuild_test.go new file mode 100644 index 000000000000..f4da97a06771 --- /dev/null +++ b/.ci/magician/cmd/mock_cloudbuild_test.go @@ -0,0 +1,20 @@ +package cmd + +type mockCloudBuild struct { + calledMethods map[string][][]any +} + +func (m *mockCloudBuild) ApproveCommunityChecker(prNumber, commitSha string) error { + m.calledMethods["ApproveCommunityChecker"] = append(m.calledMethods["ApproveCommunityChecker"], []any{prNumber, commitSha}) + return nil +} + +func (m *mockCloudBuild) GetAwaitingApprovalBuildLink(prNumber, commitSha string) (string, error) { + m.calledMethods["GetAwaitingApprovalBuildLink"] = append(m.calledMethods["GetAwaitingApprovalBuildLink"], []any{prNumber, commitSha}) + return "mocked_url", nil +} + +func (m *mockCloudBuild) TriggerMMPresubmitRuns(commitSha string, substitutions map[string]string) error { + m.calledMethods["TriggerMMPresubmitRuns"] = append(m.calledMethods["TriggerMMPresubmitRuns"], []any{commitSha, substitutions}) + return nil +} diff --git a/.ci/magician/cmd/mock_github_test.go b/.ci/magician/cmd/mock_github_test.go new file mode 100644 index 000000000000..dbd20f582a32 --- /dev/null +++ b/.ci/magician/cmd/mock_github_test.go @@ -0,0 +1,66 @@ +package cmd + +import "magician/github" + +type mockGithub struct { + author string + userType github.UserType + firstReviewer string + previousReviewers []string + calledMethods map[string][][]any +} + +func (m *mockGithub) GetPullRequestAuthor(prNumber string) (string, error) { + m.calledMethods["GetPullRequestAuthor"] = append(m.calledMethods["GetPullRequestAuthor"], []any{prNumber}) + return m.author, nil +} + +func (m *mockGithub) GetUserType(user string) github.UserType { + m.calledMethods["GetUserType"] = append(m.calledMethods["GetUserType"], []any{user}) + return m.userType +} + +func (m *mockGithub) GetPullRequestRequestedReviewer(prNumber string) (string, error) { + m.calledMethods["GetPullRequestRequestedReviewer"] = append(m.calledMethods["GetPullRequestRequestedReviewer"], []any{prNumber}) + return m.firstReviewer, nil +} + +func (m *mockGithub) GetPullRequestPreviousAssignedReviewers(prNumber string) ([]string, error) { + m.calledMethods["GetPullRequestPreviousAssignedReviewers"] = append(m.calledMethods["GetPullRequestPreviousAssignedReviewers"], []any{prNumber}) + return m.previousReviewers, nil +} + +func (m *mockGithub) RequestPullRequestReviewer(prNumber string, reviewer string) error { + m.calledMethods["RequestPullRequestReviewer"] = append(m.calledMethods["RequestPullRequestReviewer"], []any{prNumber, reviewer}) + return nil +} + +func (m *mockGithub) PostComment(prNumber string, comment string) error { + m.calledMethods["PostComment"] = append(m.calledMethods["PostComment"], []any{prNumber, comment}) + return nil +} + +func (m *mockGithub) AddLabel(prNumber string, label string) error { + m.calledMethods["AddLabel"] = append(m.calledMethods["AddLabel"], []any{prNumber, label}) + return nil +} + +func (m *mockGithub) RemoveLabel(prNumber string, label string) error { + m.calledMethods["RemoveLabel"] = append(m.calledMethods["RemoveLabel"], []any{prNumber, label}) + return nil +} + +func (m *mockGithub) PostBuildStatus(prNumber string, title string, state string, targetUrl string, commitSha string) error { + m.calledMethods["PostBuildStatus"] = append(m.calledMethods["PostBuildStatus"], []any{prNumber, title, state, targetUrl, commitSha}) + return nil +} + +func (m *mockGithub) CreateWorkflowDispatchEvent(workflowFileName string, inputs map[string]any) error { + m.calledMethods["CreateWorkflowDispatchEvent"] = append(m.calledMethods["CreateWorkflowDispatchEvent"], []any{workflowFileName, inputs}) + return nil +} + +func (m *mockGithub) GetPullRequestLabelIDs(prNumber string) (map[int]struct{}, error) { + m.calledMethods["GetPullRequestLabelIDs"] = append(m.calledMethods["GetPullRequestLabelIDs"], []any{prNumber}) + return nil, nil +} diff --git a/.ci/magician/cmd/mock_runner_test.go b/.ci/magician/cmd/mock_runner_test.go new file mode 100644 index 000000000000..16867dca123a --- /dev/null +++ b/.ci/magician/cmd/mock_runner_test.go @@ -0,0 +1,105 @@ +package cmd + +import ( + "container/list" + "errors" + "fmt" + "log" +) + +type mockRunner struct { + calledMethods map[string][][]any + cmdResults map[string]string + cwd string + dirStack *list.List +} + +func NewMockRunner() *mockRunner { + return &mockRunner{ + calledMethods: make(map[string][][]any), + cmdResults: map[string]string{ + "/mock/dir/tfc git [clone -b auto-pr-pr1 https://modular-magician:*******@github.com/modular-magician/docs-examples /mock/dir/tfoics] []": "", + "/mock/dir/tpgb git [clone -b auto-pr-pr1 https://modular-magician:*******@github.com/modular-magician/terraform-google-conversion /mock/dir/tfc] []": "", + " git [clone -b auto-pr-pr1 https://modular-magician:*******@github.com/modular-magician/terraform-provider-google /mock/dir/tpg] []": "", + "/mock/dir/tpg git [clone -b auto-pr-pr1 https://modular-magician:*******@github.com/modular-magician/terraform-provider-google-beta /mock/dir/tpgb] []": "", + "/mock/dir/magic-modules git [diff HEAD origin/main tools/missing-test-detector] []": "", + "/mock/dir/magic-modules/tools/diff-processor bin/diff-processor [breaking-changes] []": "", + "/mock/dir/magic-modules/tools/diff-processor make [build] [OLD_REF=auto-pr-pr1-old NEW_REF=auto-pr-pr1]": "", + "/mock/dir/magic-modules/tools/missing-test-detector go [mod edit -replace google/provider/new=/mock/dir/tpgb] []": "", + "/mock/dir/magic-modules/tools/missing-test-detector go [mod edit -replace google/provider/old=/mock/dir/tpgbold] []": "", + "/mock/dir/magic-modules/tools/missing-test-detector go [mod tidy] []": "", + "/mock/dir/magic-modules/tools/missing-test-detector go [run . -services-dir=/mock/dir/tpgb/google-beta/services] []": "## Missing test report\nYour PR includes resource fields which are not covered by any test.\n\nResource: `google_folder_access_approval_settings` (3 total tests)\nPlease add an acceptance test which includes these fields. The test should include the following:\n\n```hcl\nresource \"google_folder_access_approval_settings\" \"primary\" {\n uncovered_field = # value needed\n}\n\n```\n", + "/mock/dir/tfc git [diff origin/auto-pr-pr1-old origin/auto-pr-pr1 --shortstat] []": " 1 file changed, 10 insertions(+)\n", + "/mock/dir/tfc git [fetch origin auto-pr-pr1-old] []": "", + "/mock/dir/tfoics git [diff origin/auto-pr-pr1-old origin/auto-pr-pr1 --shortstat] []": "", + "/mock/dir/tfoics git [fetch origin auto-pr-pr1-old] []": "", + "/mock/dir/tpg git [diff origin/auto-pr-pr1-old origin/auto-pr-pr1 --shortstat] []": " 2 files changed, 40 insertions(+)\n", + "/mock/dir/tpg git [fetch origin auto-pr-pr1-old] []": "", + "/mock/dir/tpgb find [. -type f -name *.go -exec sed -i.bak s~github.com/hashicorp/terraform-provider-google-beta~google/provider/new~g {} +] []": "", + "/mock/dir/tpgb git [diff origin/auto-pr-pr1-old origin/auto-pr-pr1 --shortstat] []": " 2 files changed, 40 insertions(+)\n", + "/mock/dir/tpgb git [fetch origin auto-pr-pr1-old] []": "", + "/mock/dir/tpgb sed [-i.bak s|github.com/hashicorp/terraform-provider-google-beta|google/provider/new|g go.mod] []": "", + "/mock/dir/tpgb sed [-i.bak s|github.com/hashicorp/terraform-provider-google-beta|google/provider/new|g go.sum] []": "", + "/mock/dir/tpgbold find [. -type f -name *.go -exec sed -i.bak s~github.com/hashicorp/terraform-provider-google-beta~google/provider/old~g {} +] []": "", + "/mock/dir/tpgbold git [checkout origin/auto-pr-pr1-old] []": "", + "/mock/dir/tpgbold sed [-i.bak s|github.com/hashicorp/terraform-provider-google-beta|google/provider/old|g go.mod] []": "", + "/mock/dir/tpgbold sed [-i.bak s|github.com/hashicorp/terraform-provider-google-beta|google/provider/old|g go.sum] []": "", + }, + cwd: "/mock/dir/magic-modules/.ci/magician", + dirStack: list.New(), + } +} + +func (mr *mockRunner) GetCWD() string { + return mr.cwd +} + +func (mr *mockRunner) Copy(src, dest string) error { + mr.calledMethods["Copy"] = append(mr.calledMethods["Copy"], []any{src, dest}) + return nil +} + +func (mr *mockRunner) RemoveAll(path string) error { + mr.calledMethods["RemoveAll"] = append(mr.calledMethods["RemoveAll"], []any{path}) + return nil +} + +func (mr *mockRunner) PushDir(path string) error { + if mr.dirStack == nil { + mr.dirStack = list.New() + } + mr.dirStack.PushBack(mr.cwd) + mr.cwd = path + return nil +} + +func (mr *mockRunner) PopDir() error { + if mr.dirStack == nil { + return errors.New("tried to pop an empty dir stack") + } + backVal := mr.dirStack.Remove(mr.dirStack.Back()) + dir, ok := backVal.(string) + if !ok { + return fmt.Errorf("back value of dir stack was a %T, expected string", backVal) + } + mr.cwd = dir + return nil +} + +func (mr *mockRunner) Run(name string, args, env []string) (string, error) { + mr.calledMethods["Run"] = append(mr.calledMethods["Run"], []any{mr.cwd, name, args, env}) + cmd := fmt.Sprintf("%s %s %v %v", mr.cwd, name, args, env) + if result, ok := mr.cmdResults[cmd]; ok { + return result, nil + } + fmt.Printf("unknown command %s\n", cmd) + return "", nil +} + +func (mr *mockRunner) MustRun(name string, args, env []string) string { + out, err := mr.Run(name, args, env) + if err != nil { + log.Fatal(err) + } + return out +} diff --git a/.ci/magician/cmd/root.go b/.ci/magician/cmd/root.go new file mode 100644 index 000000000000..b19c57e6ea24 --- /dev/null +++ b/.ci/magician/cmd/root.go @@ -0,0 +1,45 @@ +/* +Copyright © 2023 NAME HERE +*/ +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" +) + +// rootCmd represents the base command when called without any subcommands + +var rootCmd = &cobra.Command{ + Use: "magician", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + Short: "A brief description of your application", + Long: `A longer description that spans multiple lines and likely contains +examples and usage of using your application. For example: + +Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + // Uncomment the following line if your bare application + // has an action associated with it: + // Run: func(cmd *cobra.Command, args []string) { }, +} + +// Execute adds all child commands to the root command and sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() { + err := rootCmd.Execute() + if err != nil { + os.Exit(1) + } +} + +func init() { + // Cobra also supports local flags, which will only run + // when this action is called directly. + rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} diff --git a/.ci/magician/cmd/test_tgc.go b/.ci/magician/cmd/test_tgc.go new file mode 100644 index 000000000000..109cd1fea32b --- /dev/null +++ b/.ci/magician/cmd/test_tgc.go @@ -0,0 +1,44 @@ +package cmd + +import ( + "fmt" + "magician/github" + "os" + + "github.com/spf13/cobra" +) + +var testTGCCmd = &cobra.Command{ + Use: "test-tgc", + Short: "Run tgc unit tests via workflow dispatch", + Long: `This command runs tgc unit tests via workflow dispatch + + The following PR details are expected as environment variables: + 1. COMMIT_SHA + 2. PR_NUMBER + `, + Run: func(cmd *cobra.Command, args []string) { + commit := os.Getenv("COMMIT_SHA") + pr := os.Getenv("PR_NUMBER") + + gh := github.NewGithubService() + + execTestTGC(commit, pr, gh) + }, +} + +func execTestTGC(commit, pr string, gh ttGithub) { + if err := gh.CreateWorkflowDispatchEvent("test-tgc.yml", map[string]any{ + "owner": "modular-magician", + "repo": "terraform-google-conversion", + "branch": "auto-pr-" + pr, + "sha": commit, + }); err != nil { + fmt.Printf("Error creating workflow dispatch event: %v\n", err) + os.Exit(1) + } +} + +func init() { + rootCmd.AddCommand(testTGCCmd) +} diff --git a/.ci/magician/cmd/test_tgc_test.go b/.ci/magician/cmd/test_tgc_test.go new file mode 100644 index 000000000000..f9004ddf0e8a --- /dev/null +++ b/.ci/magician/cmd/test_tgc_test.go @@ -0,0 +1,22 @@ +package cmd + +import ( + "reflect" + "testing" +) + +func TestExecTestTGC(t *testing.T) { + gh := &mockGithub{ + calledMethods: make(map[string][][]any), + } + + execTestTGC("sha1", "pr1", gh) + + method := "CreateWorkflowDispatchEvent" + expected := [][]any{{"test-tgc.yml", map[string]any{"branch": "auto-pr-pr1", "owner": "modular-magician", "repo": "terraform-google-conversion", "sha": "sha1"}}} + if calls, ok := gh.calledMethods[method]; !ok { + t.Fatal("Workflow dispatch event not created") + } else if !reflect.DeepEqual(calls, expected) { + t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) + } +} diff --git a/.ci/magician/cmd/test_tpg.go b/.ci/magician/cmd/test_tpg.go new file mode 100644 index 000000000000..b5543d18899a --- /dev/null +++ b/.ci/magician/cmd/test_tpg.go @@ -0,0 +1,60 @@ +package cmd + +import ( + "fmt" + "magician/github" + "os" + + "github.com/spf13/cobra" +) + +type ttGithub interface { + CreateWorkflowDispatchEvent(string, map[string]any) error +} + +var testTPGCmd = &cobra.Command{ + Use: "test-tpg", + Short: "Run provider unit tests via workflow dispatch", + Long: `This command runs provider unit tests via workflow dispatch + + The following PR details are expected as environment variables: + 1. VERSION (beta or ga) + 2. COMMIT_SHA + 3. PR_NUMBER + `, + Run: func(cmd *cobra.Command, args []string) { + version := os.Getenv("VERSION") + commit := os.Getenv("COMMIT_SHA") + pr := os.Getenv("PR_NUMBER") + + gh := github.NewGithubService() + + execTestTPG(version, commit, pr, gh) + }, +} + +func execTestTPG(version, commit, pr string, gh ttGithub) { + var repo string + if version == "ga" { + repo = "terraform-provider-google" + } else if version == "beta" { + repo = "terraform-provider-google-beta" + } else { + fmt.Println("invalid version specified") + os.Exit(1) + } + + if err := gh.CreateWorkflowDispatchEvent("test-tpg.yml", map[string]any{ + "owner": "modular-magician", + "repo": repo, + "branch": "auto-pr-" + pr, + "sha": commit, + }); err != nil { + fmt.Printf("Error creating workflow dispatch event: %v\n", err) + os.Exit(1) + } +} + +func init() { + rootCmd.AddCommand(testTPGCmd) +} diff --git a/.ci/magician/cmd/test_tpg_test.go b/.ci/magician/cmd/test_tpg_test.go new file mode 100644 index 000000000000..ad77c7596f21 --- /dev/null +++ b/.ci/magician/cmd/test_tpg_test.go @@ -0,0 +1,34 @@ +package cmd + +import ( + "reflect" + "testing" +) + +func TestExecTestTPG(t *testing.T) { + gh := &mockGithub{ + calledMethods: make(map[string][][]any), + } + + execTestTPG("beta", "sha1", "pr1", gh) + + method := "CreateWorkflowDispatchEvent" + expected := [][]any{{"test-tpg.yml", map[string]any{"branch": "auto-pr-pr1", "owner": "modular-magician", "repo": "terraform-provider-google-beta", "sha": "sha1"}}} + if calls, ok := gh.calledMethods[method]; !ok { + t.Fatal("Workflow dispatch event not created") + } else if !reflect.DeepEqual(calls, expected) { + t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) + } + + gh.calledMethods = make(map[string][][]any) + + execTestTPG("ga", "sha1", "pr1", gh) + + method = "CreateWorkflowDispatchEvent" + expected = [][]any{{"test-tpg.yml", map[string]any{"branch": "auto-pr-pr1", "owner": "modular-magician", "repo": "terraform-provider-google", "sha": "sha1"}}} + if calls, ok := gh.calledMethods[method]; !ok { + t.Fatal("Workflow dispatch event not created") + } else if !reflect.DeepEqual(calls, expected) { + t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) + } +} diff --git a/.ci/magician/exec/runner.go b/.ci/magician/exec/runner.go new file mode 100644 index 000000000000..c9423237b285 --- /dev/null +++ b/.ci/magician/exec/runner.go @@ -0,0 +1,107 @@ +package exec + +import ( + "container/list" + "errors" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + + cp "github.com/otiai10/copy" +) + +type actualRunner struct { + cwd string + dirStack *list.List +} + +type Runner interface { + GetCWD() string + Copy(src, dest string) error + RemoveAll(path string) error + PushDir(path string) error + PopDir() error + WriteFile(name, data string) error + Run(name string, args, env []string) (string, error) + MustRun(name string, args, env []string) string +} + +func NewRunner() (Runner, error) { + wd, err := os.Getwd() + if err != nil { + return nil, err + } + return &actualRunner{ + cwd: wd, + dirStack: list.New(), + }, nil +} + +func (ar *actualRunner) GetCWD() string { + return ar.cwd +} + +func (ar *actualRunner) Copy(src, dest string) error { + return cp.Copy(ar.abs(src), ar.abs(dest)) +} + +func (ar *actualRunner) RemoveAll(path string) error { + return os.RemoveAll(ar.abs(path)) +} + +// PushDir changes the directory for the runner to the desired path and saves the previous directory in the stack. +func (ar *actualRunner) PushDir(path string) error { + if ar.dirStack == nil { + return errors.New("attempted to push dir, but stack was nil") + } + ar.dirStack.PushFront(ar.cwd) + ar.cwd = ar.abs(path) + return nil +} + +// PopDir removes the most recently added directory from the stack and changes front to it. +func (ar *actualRunner) PopDir() error { + if ar.dirStack == nil || ar.dirStack.Len() == 0 { + return errors.New("attempted to pop dir, but stack was nil or empty") + } + frontVal := ar.dirStack.Remove(ar.dirStack.Front()) + dir, ok := frontVal.(string) + if !ok { + return fmt.Errorf("last element in dir stack was a %T, expected string", frontVal) + } + ar.cwd = dir + return nil +} + +func (ar *actualRunner) WriteFile(name, data string) error { + return os.WriteFile(ar.abs(name), []byte(data), 0644) +} + +func (ar *actualRunner) Run(name string, args, env []string) (string, error) { + cmd := exec.Command(name, args...) + cmd.Dir = ar.cwd + cmd.Env = append(os.Environ(), env...) + out, err := cmd.Output() + if err != nil { + exitErr := err.(*exec.ExitError) + return string(out), fmt.Errorf("error running %s: %v\nstdout:\n%sstderr:\n%s", name, err, out, exitErr.Stderr) + } + return string(out), nil +} + +func (ar *actualRunner) MustRun(name string, args, env []string) string { + out, err := ar.Run(name, args, env) + if err != nil { + log.Fatal(err) + } + return out +} + +func (ar *actualRunner) abs(path string) string { + if !filepath.IsAbs(path) { + return filepath.Join(ar.cwd, path) + } + return path +} diff --git a/.ci/containers/membership-checker/REVIEWER_ASSIGNMENT_COMMENT.md b/.ci/magician/github/REVIEWER_ASSIGNMENT_COMMENT.md similarity index 100% rename from .ci/containers/membership-checker/REVIEWER_ASSIGNMENT_COMMENT.md rename to .ci/magician/github/REVIEWER_ASSIGNMENT_COMMENT.md diff --git a/.ci/magician/github/get.go b/.ci/magician/github/get.go new file mode 100644 index 000000000000..48f3cdab1a3e --- /dev/null +++ b/.ci/magician/github/get.go @@ -0,0 +1,92 @@ +package github + +import ( + "fmt" + utils "magician/utility" +) + +func (gh *github) GetPullRequestAuthor(prNumber string) (string, error) { + url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/issues/%s", prNumber) + + var pullRequest struct { + User struct { + Login string `json:"login"` + } `json:"user"` + } + + _, err := utils.RequestCall(url, "GET", gh.token, &pullRequest, nil) + if err != nil { + return "", err + } + + return pullRequest.User.Login, nil +} + +func (gh *github) GetPullRequestRequestedReviewer(prNumber string) (string, error) { + url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/pulls/%s/requested_reviewers", prNumber) + + var requestedReviewers struct { + Users []struct { + Login string `json:"login"` + } `json:"users"` + } + + _, err := utils.RequestCall(url, "GET", gh.token, &requestedReviewers, nil) + if err != nil { + return "", err + } + + if requestedReviewers.Users == nil || len(requestedReviewers.Users) == 0 { + return "", nil + } + + return requestedReviewers.Users[0].Login, nil +} + +func (gh *github) GetPullRequestPreviousAssignedReviewers(prNumber string) ([]string, error) { + url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/pulls/%s/reviews", prNumber) + + var reviews []struct { + User struct { + Login string `json:"login"` + } `json:"user"` + } + + _, err := utils.RequestCall(url, "GET", gh.token, &reviews, nil) + if err != nil { + return nil, err + } + + previousAssignedReviewers := map[string]struct{}{} + for _, review := range reviews { + previousAssignedReviewers[review.User.Login] = struct{}{} + } + + result := []string{} + for key := range previousAssignedReviewers { + result = append(result, key) + } + + return result, nil +} + +func (gh *github) GetPullRequestLabelIDs(prNumber string) (map[int]struct{}, error) { + url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/pulls/%s/reviews", prNumber) + + var labels []struct { + Label struct { + ID int `json:"id"` + } `json:"label"` + } + + if _, err := utils.RequestCall(url, "GET", gh.token, &labels, nil); err != nil { + return nil, err + } + + var result map[int]struct{} + for _, label := range labels { + result[label.Label.ID] = struct{}{} + } + + return result, nil +} diff --git a/.ci/magician/github/init.go b/.ci/magician/github/init.go new file mode 100644 index 000000000000..3822a0274c5f --- /dev/null +++ b/.ci/magician/github/init.go @@ -0,0 +1,35 @@ +package github + +import ( + "fmt" + "os" +) + +// GithubService represents the service for GitHub interactions. +type github struct { + token string +} + +type GithubService interface { + GetPullRequestAuthor(prNumber string) (string, error) + GetPullRequestRequestedReviewer(prNumber string) (string, error) + GetPullRequestPreviousAssignedReviewers(prNumber string) ([]string, error) + GetPullRequestLabelIDs(prNumber string) (map[int]struct{}, error) + GetUserType(user string) UserType + PostBuildStatus(prNumber, title, state, targetURL, commitSha string) error + PostComment(prNumber, comment string) error + RequestPullRequestReviewer(prNumber, assignee string) error + AddLabel(prNumber, label string) error + RemoveLabel(prNumber, label string) error + CreateWorkflowDispatchEvent(workflowFileName string, inputs map[string]any) error +} + +func NewGithubService() GithubService { + githubToken, ok := os.LookupEnv("GITHUB_TOKEN") + if !ok { + fmt.Println("Did not provide GITHUB_TOKEN environment variable") + os.Exit(1) + } + + return &github{token: githubToken} +} diff --git a/.ci/containers/membership-checker/membership.go b/.ci/magician/github/membership.go similarity index 57% rename from .ci/containers/membership-checker/membership.go rename to .ci/magician/github/membership.go index 26768ddd30ec..460769bae63c 100644 --- a/.ci/containers/membership-checker/membership.go +++ b/.ci/magician/github/membership.go @@ -1,7 +1,8 @@ -package main +package github import ( "fmt" + utils "magician/utility" "math/rand" "time" @@ -26,70 +27,73 @@ var ( } // This is for new team members who are onboarding - trustedContributors = []string{} + trustedContributors = []string{ + "BBBmau", + } // This is for reviewers who are "on vacation": will not receive new review assignments but will still receive re-requests for assigned PRs. onVacationReviewers = []string{ - "slevenick", + "hao-nan-li", + "roaks3", } ) -type userType int64 +type UserType int64 const ( - communityUserType userType = iota - googlerUserType - coreContributorUserType + CommunityUserType UserType = iota + GooglerUserType + CoreContributorUserType ) -func (ut userType) String() string { +func (ut UserType) String() string { switch ut { - case googlerUserType: + case GooglerUserType: return "Googler" - case coreContributorUserType: + case CoreContributorUserType: return "Core Contributor" default: return "Community Contributor" } } -// Check if a user is team member to not request a random reviewer -func isTeamMember(author string) bool { - return slices.Contains(reviewerRotation, author) || slices.Contains(trustedContributors, author) -} - -func isTeamReviewer(reviewer string) bool { - return slices.Contains(reviewerRotation, reviewer) -} - -func getUserType(user, GITHUB_TOKEN string) userType { - if isTeamMember(user) { +func (gh *github) GetUserType(user string) UserType { + if isTeamMember(user, gh.token) { fmt.Println("User is a team member") - return coreContributorUserType + return CoreContributorUserType } - if isOrgMember(user, "GoogleCloudPlatform", GITHUB_TOKEN) { + if isOrgMember(user, "GoogleCloudPlatform", gh.token) { fmt.Println("User is a GCP org member") - return googlerUserType + return GooglerUserType } - if isOrgMember(user, "googlers", GITHUB_TOKEN) { + if isOrgMember(user, "googlers", gh.token) { fmt.Println("User is a googlers org member") - return googlerUserType + return GooglerUserType } - return communityUserType + return CommunityUserType +} + +// Check if a user is team member to not request a random reviewer +func isTeamMember(author, githubToken string) bool { + return slices.Contains(reviewerRotation, author) || slices.Contains(trustedContributors, author) +} + +func IsTeamReviewer(reviewer string) bool { + return slices.Contains(reviewerRotation, reviewer) } -func isOrgMember(author, org, GITHUB_TOKEN string) bool { +func isOrgMember(author, org, githubToken string) bool { url := fmt.Sprintf("https://api.github.com/orgs/%s/members/%s", org, author) - res, _ := requestCall(url, "GET", GITHUB_TOKEN, nil, nil) + res, _ := utils.RequestCall(url, "GET", githubToken, nil, nil) return res != 404 } -func getRandomReviewer() string { - availableReviewers := removes(reviewerRotation, onVacationReviewers) +func GetRandomReviewer() string { + availableReviewers := utils.Removes(reviewerRotation, onVacationReviewers) rand.Seed(time.Now().UnixNano()) reviewer := availableReviewers[rand.Intn(len(availableReviewers))] return reviewer diff --git a/.ci/containers/membership-checker/membership_test.go b/.ci/magician/github/membership_test.go similarity index 96% rename from .ci/containers/membership-checker/membership_test.go rename to .ci/magician/github/membership_test.go index 3e3e2b2989dd..1f2264bdf3c2 100644 --- a/.ci/containers/membership-checker/membership_test.go +++ b/.ci/magician/github/membership_test.go @@ -1,4 +1,4 @@ -package main +package github import ( "testing" diff --git a/.ci/magician/github/reviewer_assignment.go b/.ci/magician/github/reviewer_assignment.go new file mode 100644 index 000000000000..b3e968145d9f --- /dev/null +++ b/.ci/magician/github/reviewer_assignment.go @@ -0,0 +1,52 @@ +package github + +import ( + "fmt" + "strings" + "text/template" + + _ "embed" +) + +var ( + //go:embed REVIEWER_ASSIGNMENT_COMMENT.md + reviewerAssignmentComment string +) + +// Returns a list of users to request review from, as well as a new primary reviewer if this is the first run. +func ChooseReviewers(firstRequestedReviewer string, previouslyInvolvedReviewers []string) (reviewersToRequest []string, newPrimaryReviewer string) { + hasPrimaryReviewer := false + newPrimaryReviewer = "" + + if firstRequestedReviewer != "" { + hasPrimaryReviewer = true + } + + for _, reviewer := range previouslyInvolvedReviewers { + if IsTeamReviewer(reviewer) { + hasPrimaryReviewer = true + reviewersToRequest = append(reviewersToRequest, reviewer) + } + } + + if !hasPrimaryReviewer { + newPrimaryReviewer = GetRandomReviewer() + reviewersToRequest = append(reviewersToRequest, newPrimaryReviewer) + } + + return reviewersToRequest, newPrimaryReviewer +} + +func FormatReviewerComment(newPrimaryReviewer string, authorUserType UserType, trusted bool) string { + tmpl, err := template.New("REVIEWER_ASSIGNMENT_COMMENT.md").Parse(reviewerAssignmentComment) + if err != nil { + panic(fmt.Sprintf("Unable to parse REVIEWER_ASSIGNMENT_COMMENT.md: %s", err)) + } + sb := new(strings.Builder) + tmpl.Execute(sb, map[string]any{ + "reviewer": newPrimaryReviewer, + "authorUserType": authorUserType.String(), + "trusted": trusted, + }) + return sb.String() +} diff --git a/.ci/containers/membership-checker/reviewer_assignment_test.go b/.ci/magician/github/reviewer_assignment_test.go similarity index 88% rename from .ci/containers/membership-checker/reviewer_assignment_test.go rename to .ci/magician/github/reviewer_assignment_test.go index 4763b650ee1b..b96ac59a7389 100644 --- a/.ci/containers/membership-checker/reviewer_assignment_test.go +++ b/.ci/magician/github/reviewer_assignment_test.go @@ -1,7 +1,8 @@ -package main +package github import ( "fmt" + utils "magician/utility" "strings" "testing" @@ -18,7 +19,7 @@ func TestChooseReviewers(t *testing.T) { "no previous review requests assigns new reviewer from team": { FirstRequestedReviewer: "", PreviouslyInvolvedReviewers: []string{}, - ExpectReviewersFromList: removes(reviewerRotation, onVacationReviewers), + ExpectReviewersFromList: utils.Removes(reviewerRotation, onVacationReviewers), ExpectPrimaryReviewer: true, }, "first requested reviewer means that primary reviewer was already selected": { @@ -35,7 +36,7 @@ func TestChooseReviewers(t *testing.T) { "previously involved reviewers that are not team members are ignored": { FirstRequestedReviewer: "", PreviouslyInvolvedReviewers: []string{"foobar"}, - ExpectReviewersFromList: removes(reviewerRotation, onVacationReviewers), + ExpectReviewersFromList: utils.Removes(reviewerRotation, onVacationReviewers), ExpectPrimaryReviewer: true, }, "only previously involved team member reviewers will have review requested": { @@ -56,7 +57,7 @@ func TestChooseReviewers(t *testing.T) { tc := tc t.Run(tn, func(t *testing.T) { t.Parallel() - reviewers, primaryReviewer := chooseReviewers(tc.FirstRequestedReviewer, tc.PreviouslyInvolvedReviewers) + reviewers, primaryReviewer := ChooseReviewers(tc.FirstRequestedReviewer, tc.PreviouslyInvolvedReviewers) if tc.ExpectPrimaryReviewer && primaryReviewer == "" { t.Error("wanted primary reviewer to be returned; got none") } @@ -82,22 +83,22 @@ func TestChooseReviewers(t *testing.T) { func TestFormatReviewerComment(t *testing.T) { cases := map[string]struct { Reviewer string - AuthorUserType userType + AuthorUserType UserType Trusted bool }{ "community contributor": { Reviewer: "foobar", - AuthorUserType: communityUserType, + AuthorUserType: CommunityUserType, Trusted: false, }, "googler": { Reviewer: "foobar", - AuthorUserType: googlerUserType, + AuthorUserType: GooglerUserType, Trusted: true, }, "core contributor": { Reviewer: "foobar", - AuthorUserType: coreContributorUserType, + AuthorUserType: CoreContributorUserType, Trusted: true, }, } @@ -106,7 +107,7 @@ func TestFormatReviewerComment(t *testing.T) { tc := tc t.Run(tn, func(t *testing.T) { t.Parallel() - comment := formatReviewerComment(tc.Reviewer, tc.AuthorUserType, tc.Trusted) + comment := FormatReviewerComment(tc.Reviewer, tc.AuthorUserType, tc.Trusted) t.Log(comment) if !strings.Contains(comment, fmt.Sprintf("@%s", tc.Reviewer)) { t.Errorf("wanted comment to contain @%s; does not.", tc.Reviewer) @@ -117,7 +118,7 @@ func TestFormatReviewerComment(t *testing.T) { if strings.Contains(comment, fmt.Sprintf("~%s~", tc.AuthorUserType.String())) { t.Errorf("wanted user type (%s) in comment to not be crossed out, but it is", tc.AuthorUserType.String()) } - for _, ut := range []userType{communityUserType, googlerUserType, coreContributorUserType} { + for _, ut := range []UserType{CommunityUserType, GooglerUserType, CoreContributorUserType} { if ut != tc.AuthorUserType && !strings.Contains(comment, fmt.Sprintf("~%s~", ut.String())) { t.Errorf("wanted other user type (%s) in comment to be crossed out, but it is not", ut) } diff --git a/.ci/magician/github/set.go b/.ci/magician/github/set.go new file mode 100644 index 000000000000..90f116f673fb --- /dev/null +++ b/.ci/magician/github/set.go @@ -0,0 +1,116 @@ +package github + +import ( + "fmt" + utils "magician/utility" + "net/http" +) + +func (gh *github) PostBuildStatus(prNumber, title, state, targetURL, commitSha string) error { + url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/statuses/%s", commitSha) + + postBody := map[string]string{ + "context": title, + "state": state, + "target_url": targetURL, + } + + _, err := utils.RequestCall(url, "POST", gh.token, nil, postBody) + if err != nil { + return err + } + + fmt.Printf("Successfully posted build status to pull request %s\n", prNumber) + + return nil +} + +func (gh *github) PostComment(prNumber, comment string) error { + url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/issues/%s/comments", prNumber) + + body := map[string]string{ + "body": comment, + } + + reqStatusCode, err := utils.RequestCall(url, "POST", gh.token, nil, body) + if err != nil { + return err + } + + if reqStatusCode != http.StatusCreated { + return fmt.Errorf("error posting comment for PR %s", prNumber) + } + + fmt.Printf("Successfully posted comment to pull request %s\n", prNumber) + + return nil +} + +func (gh *github) RequestPullRequestReviewer(prNumber, assignee string) error { + url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/pulls/%s/requested_reviewers", prNumber) + + body := map[string][]string{ + "reviewers": {assignee}, + "team_reviewers": {}, + } + + reqStatusCode, err := utils.RequestCall(url, "POST", gh.token, nil, body) + if err != nil { + return err + } + + if reqStatusCode != http.StatusCreated { + return fmt.Errorf("error adding reviewer for PR %s", prNumber) + } + + fmt.Printf("Successfully added reviewer %s to pull request %s\n", assignee, prNumber) + + return nil +} + +func (gh *github) AddLabel(prNumber, label string) error { + url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/issues/%s/labels", prNumber) + + body := map[string][]string{ + "labels": {label}, + } + _, err := utils.RequestCall(url, "POST", gh.token, nil, body) + + if err != nil { + return fmt.Errorf("failed to add %s label: %s", label, err) + } + + return nil + +} + +func (gh *github) RemoveLabel(prNumber, label string) error { + url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/issues/%s/labels/%s", prNumber, label) + _, err := utils.RequestCall(url, "DELETE", gh.token, nil, nil) + + if err != nil { + return fmt.Errorf("failed to remove %s label: %s", label, err) + } + + return nil +} + +func (gh *github) CreateWorkflowDispatchEvent(workflowFileName string, inputs map[string]any) error { + url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/actions/workflows/%s/dispatches", workflowFileName) + resp, err := utils.RequestCall(url, "POST", gh.token, nil, map[string]any{ + "ref": "main", + "inputs": inputs, + }) + + if resp != 200 && resp != 204 { + return fmt.Errorf("server returned %d creating workflow dispatch event", resp) + } + + if err != nil { + return fmt.Errorf("failed to create workflow dispatch event: %s", err) + } + + fmt.Printf("Successfully created workflow dispatch event for %s with inputs %v", workflowFileName, inputs) + + return nil +} diff --git a/.ci/containers/membership-checker/go.mod b/.ci/magician/go.mod similarity index 66% rename from .ci/containers/membership-checker/go.mod rename to .ci/magician/go.mod index 903d7b9a2c54..4ebd7498dd30 100644 --- a/.ci/containers/membership-checker/go.mod +++ b/.ci/magician/go.mod @@ -1,25 +1,30 @@ -module membership-checker +module magician -go 1.20 +go 1.19 require ( + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/spf13/cobra v1.7.0 + github.com/spf13/pflag v1.0.5 // indirect golang.org/x/exp v0.0.0-20230314191032-db074128a8ec google.golang.org/api v0.112.0 ) +require github.com/otiai10/copy v1.12.0 + require ( cloud.google.com/go/compute v1.18.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.7.0 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/net v0.8.0 // indirect + golang.org/x/net v0.15.0 // indirect golang.org/x/oauth2 v0.6.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/sys v0.12.0 // indirect + golang.org/x/text v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488 // indirect google.golang.org/grpc v1.53.0 // indirect diff --git a/.ci/containers/membership-checker/go.sum b/.ci/magician/go.sum similarity index 88% rename from .ci/containers/membership-checker/go.sum rename to .ci/magician/go.sum index da929d371259..a7ac8b6878b0 100644 --- a/.ci/containers/membership-checker/go.sum +++ b/.ci/magician/go.sum @@ -9,6 +9,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -16,8 +17,9 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -47,8 +49,18 @@ github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9 github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/otiai10/copy v1.12.0 h1:cLMgSQnXBs1eehF0Wy/FAGsgDTDmAqFR7rQylBb1nDY= +github.com/otiai10/copy v1.12.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww= +github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -72,8 +84,8 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= @@ -84,13 +96,13 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= diff --git a/.ci/magician/main.go b/.ci/magician/main.go new file mode 100644 index 000000000000..7cf107ae4fbc --- /dev/null +++ b/.ci/magician/main.go @@ -0,0 +1,11 @@ +/* +Copyright © 2023 NAME HERE + +*/ +package main + +import "magician/cmd" + +func main() { + cmd.Execute() +} diff --git a/.ci/magician/utility/utils.go b/.ci/magician/utility/utils.go new file mode 100644 index 000000000000..6bc778f97e3f --- /dev/null +++ b/.ci/magician/utility/utils.go @@ -0,0 +1,64 @@ +package utility + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + + "golang.org/x/exp/slices" +) + +func RequestCall(url, method, credentials string, result any, body any) (int, error) { + client := &http.Client{} + jsonBody, err := json.Marshal(body) + if err != nil { + return 1, fmt.Errorf("error marshaling JSON: %s", err) + } + req, err := http.NewRequest(method, url, bytes.NewBuffer(jsonBody)) + if err != nil { + return 2, fmt.Errorf("error creating request: %s", err) + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", credentials)) + req.Header.Set("Content-Type", "application/json") + fmt.Println("") + fmt.Println("request url: ", url) + fmt.Println("request body: ", string(jsonBody)) // Convert to string + fmt.Println("") + + resp, err := client.Do(req) + if err != nil { + return 3, err + } + defer resp.Body.Close() + + respBodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return 5, err + } + + fmt.Println("response status-code: ", resp.StatusCode) + fmt.Println("response body: ", string(respBodyBytes)) // Convert to string + fmt.Println("") + + // Decode the response, if needed + if result != nil { + if err = json.Unmarshal(respBodyBytes, &result); err != nil { + return 4, err + } + } + + return resp.StatusCode, nil +} + +func Removes(s1 []string, s2 []string) []string { + result := make([]string, 0, len(s1)) + + for _, v := range s1 { + if !slices.Contains(s2, v) { + result = append(result, v) + } + } + return result +} diff --git a/.ci/containers/membership-checker/utils_test.go b/.ci/magician/utility/utils_test.go similarity index 95% rename from .ci/containers/membership-checker/utils_test.go rename to .ci/magician/utility/utils_test.go index adea48f933cb..2f82538ab7c1 100644 --- a/.ci/containers/membership-checker/utils_test.go +++ b/.ci/magician/utility/utils_test.go @@ -1,4 +1,4 @@ -package main +package utility import ( "reflect" @@ -41,7 +41,7 @@ func TestRemovesList(t *testing.T) { }, } for tn, tc := range cases { - result := removes(tc.Original, tc.Removal) + result := Removes(tc.Original, tc.Removal) if !reflect.DeepEqual(result, tc.Expected) { t.Errorf("bad: %s, '%s' removes '%s' expect result: %s, but got: %s", tn, tc.Original, tc.Removal, tc.Expected, result) } diff --git a/.ci/scripts/bash-plus/downstream-waiter/wait_for_commit.sh b/.ci/scripts/bash-plus/downstream-waiter/wait_for_commit.sh index f9e8315760cd..81baf6e724b2 100755 --- a/.ci/scripts/bash-plus/downstream-waiter/wait_for_commit.sh +++ b/.ci/scripts/bash-plus/downstream-waiter/wait_for_commit.sh @@ -26,9 +26,13 @@ fi while true; do if [ "$BASE_BRANCH" != "main" ]; then SYNC_HEAD="$(git rev-parse --short origin/$SYNC_BRANCH)" - BASE_PARENT="$(git rev-parse --short origin/$BASE_BRANCH~)" + BASE_PARENT="$(git rev-parse --short $SHA~)" if [ "$SYNC_HEAD" == "$BASE_PARENT" ]; then break; + else + echo "sync branch is at: $SYNC_HEAD" + echo "current commit is $SHA" + git fetch origin $SYNC_BRANCH fi else commits="$(git log --pretty=%H origin/$SYNC_BRANCH..origin/$BASE_BRANCH | tail -n 1)" diff --git a/.ci/scripts/bash-plus/pre-build-validator/validate.sh b/.ci/scripts/bash-plus/pre-build-validator/validate.sh deleted file mode 100755 index 2b9463ba4c00..000000000000 --- a/.ci/scripts/bash-plus/pre-build-validator/validate.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# remove after 9/2023 - -set -e - -gh_repo=magic-modules - -post_body=$(jq -n \ - --arg owner "GoogleCloudPlatform" \ - --arg repo "$gh_repo" \ - --arg sha "$COMMIT_SHA" \ - '{ - ref: "main", - inputs: { - owner: $owner, - repo: $repo, - sha: $sha, - } - }') - -curl \ - -X POST \ - -u "modular-magician:$GITHUB_TOKEN" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/GoogleCloudPlatform/magic-modules/actions/workflows/pre-build-validation.yml/dispatches" \ - -d "$post_body" diff --git a/.ci/scripts/build-environment/downstream-builder/generate_downstream.sh b/.ci/scripts/build-environment/downstream-builder/generate_downstream.sh index f1e054aa2558..88ac64f5be87 100755 --- a/.ci/scripts/build-environment/downstream-builder/generate_downstream.sh +++ b/.ci/scripts/build-environment/downstream-builder/generate_downstream.sh @@ -1,6 +1,7 @@ #! /bin/bash set -e +NEWLINE=$'\n' function clone_repo() { SCRATCH_OWNER=modular-magician @@ -96,17 +97,19 @@ elif [ "$COMMAND" == "base" ]; then COMMIT_MESSAGE="Old generated code for MM PR $REFERENCE." elif [ "$COMMAND" == "downstream" ]; then BRANCH=downstream-pr-$REFERENCE - COMMIT_MESSAGE="$(git log -1 --pretty=%B "$REFERENCE")" + ORIGINAL_MESSAGE="$(git log -1 --pretty=%B "$REFERENCE")" + COMMIT_MESSAGE="$ORIGINAL_MESSAGE$NEWLINE[upstream:$REFERENCE]" fi if [ "$REPO" == "terraform" ]; then pushd $LOCAL_PATH go mod download - find . -type f -not -wholename "./.git*" -not -wholename "./.changelog*" -not -name ".travis.yml" -not -name ".golangci.yml" -not -name "CHANGELOG.md" -not -name "GNUmakefile" -not -name "docscheck.sh" -not -name "LICENSE" -not -name "README.md" -not -wholename "./examples*" -not -name ".go-version" -not -name ".hashibot.hcl" -print0 | xargs -0 git rm + find . -type f -not -wholename "./.git*" -not -wholename "./.changelog*" -not -name ".travis.yml" -not -name ".golangci.yml" -not -name "CHANGELOG.md" -not -name "CHANGELOG_v*.md" -not -name "GNUmakefile" -not -name "docscheck.sh" -not -name "LICENSE" -not -name "README.md" -not -wholename "./examples*" -not -name ".go-version" -not -name ".hashibot.hcl" -print0 | xargs -0 git rm popd fi if [ "$REPO" == "terraform-google-conversion" ]; then + # Generate tfplan2cai pushd $LOCAL_PATH # clear out the templates as they are copied during # generation from mmv1/third_party/validator/tests/data @@ -119,6 +122,13 @@ if [ "$REPO" == "terraform-google-conversion" ]; then bundle exec compiler.rb -a -e terraform -f validator -o $LOCAL_PATH/tfplan2cai -v $VERSION + # Generate cai2hcl + pushd $LOCAL_PATH + rm -rf ./cai2hcl/* + popd + + bundle exec compiler.rb -a -e terraform -f tgc_cai2hcl -o $LOCAL_PATH/cai2hcl -v $VERSION + if [ "$COMMAND" == "downstream" ]; then pushd $LOCAL_PATH go get -d github.com/hashicorp/terraform-provider-google-beta@$BASE_BRANCH diff --git a/.ci/scripts/go-plus/gcb-terraform-vcr-tester/test_terraform_vcr.sh b/.ci/scripts/go-plus/gcb-terraform-vcr-tester/test_terraform_vcr.sh index 1cfa480e6409..23975f99c82e 100755 --- a/.ci/scripts/go-plus/gcb-terraform-vcr-tester/test_terraform_vcr.sh +++ b/.ci/scripts/go-plus/gcb-terraform-vcr-tester/test_terraform_vcr.sh @@ -13,6 +13,7 @@ gh_repo=terraform-provider-google-beta NEWLINE=$'\n' new_branch="auto-pr-$pr_number" +old_branch="auto-pr-$pr_number-old" git_remote=https://github.com/$github_username/$gh_repo local_path=$GOPATH/src/github.com/hashicorp/$gh_repo mkdir -p "$(dirname $local_path)" @@ -21,9 +22,13 @@ pushd $local_path # Only skip tests if we can tell for sure that no go files were changed echo "Checking for modified go files" +# Fetch the latest commit in the old branch, associating them locally +# This will let us compare the old and new branch by name on the next line +git fetch origin $old_branch:$old_branch --depth 1 # get the names of changed files and look for go files # (ignoring "no matches found" errors from grep) -gofiles=$(git diff --name-only HEAD~1 | { grep -e "\.go$" -e "go.mod$" -e "go.sum$" || test $? = 1; }) +# If there was no code generated, this will always return nothing (because there's no diff) +gofiles=$(git diff $new_branch $old_branch --name-only | { grep -e "\.go$" -e "go.mod$" -e "go.sum$" || test $? = 1; }) if [[ -z $gofiles ]]; then echo "Skipping tests: No go files changed" exit 0 @@ -324,4 +329,4 @@ fi set -e -update_status ${test_state} +update_status ${test_state} \ No newline at end of file diff --git a/.ci/scripts/go-plus/github-differ/generate_comment.sh b/.ci/scripts/go-plus/github-differ/generate_comment.sh index 455d6fa63aa6..191dde497fb4 100755 --- a/.ci/scripts/go-plus/github-differ/generate_comment.sh +++ b/.ci/scripts/go-plus/github-differ/generate_comment.sh @@ -57,12 +57,14 @@ if ! git diff --exit-code origin/$OLD_BRANCH origin/$NEW_BRANCH; then fi popd -## Breaking change setup and execution +## Diff processor - TPG set +e pushd $MM_LOCAL_PATH/tools/diff-processor cp -r $TPG_LOCAL_PATH old/ cp -r $TPG_LOCAL_PATH new/ make build OLD_REF=$OLD_BRANCH NEW_REF=$NEW_BRANCH + +### Breaking changes TPG_BREAKING="$(bin/diff-processor breaking-changes)" retVal=$? if [ $retVal -ne 0 ]; then @@ -70,16 +72,35 @@ if [ $retVal -ne 0 ]; then BREAKING_CHANGE_BUILD_FAILURE=1 fi +### Add labels +GITHUB_TOKEN=$GITHUB_TOKEN $MM_LOCAL_PATH/tools/diff-processor/bin/diff-processor add-labels $PR_NUMBER rm -rf ./old/ ./new/ ./bin/ +popd +set -e + +## Diff processor - TPGB +set +e +pushd $MM_LOCAL_PATH/tools/diff-processor cp -r $TPGB_LOCAL_PATH old/ cp -r $TPGB_LOCAL_PATH new/ make build OLD_REF=$OLD_BRANCH NEW_REF=$NEW_BRANCH + +### Breaking changes TPGB_BREAKING="$(bin/diff-processor breaking-changes)" retVal=$? if [ $retVal -ne 0 ]; then TPGB_BREAKING="" BREAKING_CHANGE_BUILD_FAILURE=1 fi + +### Add labels +GITHUB_TOKEN=$GITHUB_TOKEN $MM_LOCAL_PATH/tools/diff-processor/bin/diff-processor add-labels $PR_NUMBER +rm -rf ./old/ ./new/ ./bin/ +popd +set -e + +## Report breaking change failures +set +e if [ $BREAKING_CHANGE_BUILD_FAILURE -eq 0 ]; then echo "Breaking changes succeeded" # Export variables here so that they can be used in compare_breaking_changes @@ -91,7 +112,6 @@ else echo "Breaking changes failed" BREAKINGCHANGES="## Breaking Change Detection Failed${NEWLINE}The breaking change detector crashed during execution. This is usually due to the downstream provider(s) failing to compile. Please investigate or follow up with your reviewer." fi -popd set -e ## Missing test setup and execution diff --git a/.ci/scripts/go-plus/magician/exec.sh b/.ci/scripts/go-plus/magician/exec.sh new file mode 100755 index 000000000000..acd9714f09bd --- /dev/null +++ b/.ci/scripts/go-plus/magician/exec.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Get the directory of the current script +DIR="$(dirname $(realpath $0))" + +# Construct the path to the Go program directory and binary +GO_PROGRAM_DIR="$DIR/../../../magician/" +GO_BINARY="$GO_PROGRAM_DIR/magician_binary" + +pushd $GO_PROGRAM_DIR + +set -x +# Check if the binary exists +if [ ! -f "$GO_BINARY" ]; then + # If it doesn't exist, compile the binary + echo "Building the magician binary at $GO_BINARY" + go build -o "$GO_BINARY" +fi + +# If there are no arguments only compile the binary +if [ "$#" -eq 0 ]; then + echo "No arguments provided" + exit 0 +fi + +# Run the binary and pass all arguments +$GO_BINARY "$@" +set +x diff --git a/.ci/scripts/go-plus/tgc-tester/test_tgc.sh b/.ci/scripts/go-plus/tgc-tester/test_tgc.sh index d4023a2d79d4..10a7dcebe01f 100755 --- a/.ci/scripts/go-plus/tgc-tester/test_tgc.sh +++ b/.ci/scripts/go-plus/tgc-tester/test_tgc.sh @@ -2,71 +2,30 @@ set -e -pr_number=$1 -mm_commit_sha=$2 -build_id=$3 -project_id=$4 -build_step=$5 -gh_repo=$6 +pr_number=${PR_NUMBER:-$1} +mm_commit_sha=${COMMIT_SHA:-$2} +gh_repo=terraform-google-conversion github_username=modular-magician - new_branch="auto-pr-$pr_number" -git_remote=https://$github_username:$GITHUB_TOKEN@github.com/$github_username/$gh_repo -local_path=$GOPATH/src/github.com/GoogleCloudPlatform/$gh_repo -mkdir -p "$(dirname $local_path)" -git clone $git_remote $local_path --branch $new_branch --depth 2 -pushd $local_path - -# Only skip tests if we can tell for sure that no go files were changed -echo "Checking for modified go files" -# get the names of changed files and look for go files -# (ignoring "no matches found" errors from grep) -gofiles=$(git diff --name-only HEAD~1 | { grep "\.go$" || test $? = 1; }) -if [[ -z $gofiles ]]; then - echo "Skipping tests: No go files changed" - exit 0 -else - echo "Running tests: Go files changed" -fi - -post_body=$( jq -n \ - --arg context "${gh_repo}-test" \ - --arg target_url "https://console.cloud.google.com/cloud-build/builds;region=global/${build_id};step=${build_step}?project=${project_id}" \ - --arg state "pending" \ - '{context: $context, target_url: $target_url, state: $state}') - -curl \ - -X POST \ - -u "$github_username:$GITHUB_TOKEN" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/GoogleCloudPlatform/magic-modules/statuses/$mm_commit_sha" \ - -d "$post_body" - -set +e - -go mod edit -replace github.com/hashicorp/terraform-provider-google-beta=github.com/$github_username/terraform-provider-google-beta@$new_branch -go mod tidy - -make test -exit_code=$? - -set -e - -if [ $exit_code -ne 0 ]; then - state="failure" -else - state="success" -fi -post_body=$( jq -n \ - --arg context "${gh_repo}-test" \ - --arg target_url "https://console.cloud.google.com/cloud-build/builds;region=global/${build_id};step=${build_step}?project=${project_id}" \ - --arg state "${state}" \ - '{context: $context, target_url: $target_url, state: $state}') +post_body=$(jq -n \ + --arg owner "$github_username" \ + --arg branch "$new_branch" \ + --arg repo "$gh_repo" \ + --arg sha "$mm_commit_sha" \ + '{ + ref: "tgc-units", + inputs: { + owner: $owner, + repo: $repo, + branch: $branch, + sha: $sha + } + }') curl \ -X POST \ -u "$github_username:$GITHUB_TOKEN" \ -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/GoogleCloudPlatform/magic-modules/statuses/$mm_commit_sha" \ + "https://api.github.com/repos/GoogleCloudPlatform/magic-modules/actions/workflows/test-tgc.yml/dispatches" \ -d "$post_body" diff --git a/.github/workflows/magic-modules.yml b/.github/workflows/magic-modules.yml index a6596c6ce5c1..1a2c88510fa2 100644 --- a/.github/workflows/magic-modules.yml +++ b/.github/workflows/magic-modules.yml @@ -39,6 +39,15 @@ jobs: uses: actions/checkout@v3 with: path: repo + fetch-depth: 2 + - name: Merge base branch + id: pull_request + run: | + cd repo + git config user.name "modular-magician" + git config user.email "magic-modules@google.com" + git fetch origin ${{ github.base_ref }} # Fetch the base branch + git merge --no-ff origin/${{ github.base_ref }} # Merge with the base branch - name: Set up Ruby uses: ruby/setup-ruby@ec02537da5712d66d4d50a0f33b7eb52773b5ed1 with: diff --git a/.github/workflows/membership-checker.yml b/.github/workflows/membership-checker.yml index a0c9b392599a..fbc1d10bba62 100644 --- a/.github/workflows/membership-checker.yml +++ b/.github/workflows/membership-checker.yml @@ -5,7 +5,7 @@ permissions: read-all on: pull_request: paths: - - '.ci/containers/membership-checker/**' + - '.ci/magician/**' jobs: build-and-unit-tests: @@ -18,5 +18,5 @@ jobs: go-version: '^1.19.1' - name: Run membership checker unit tests run: | - cd .ci/containers/membership-checker - go test -v + cd .ci/magician + go test ./... -v diff --git a/.github/workflows/pre-build-validation.yml b/.github/workflows/pre-build-validation.yml deleted file mode 100644 index 2ce026431ea6..000000000000 --- a/.github/workflows/pre-build-validation.yml +++ /dev/null @@ -1,71 +0,0 @@ -name: Pre-Build Validation - -permissions: - actions: read - contents: read - statuses: write - -env: - status_suffix: "-pre-build-validation" - -on: - workflow_dispatch: - inputs: - owner: - description: 'The owner of the fork' - required: false - default: 'modular-magician' - repo: - description: 'The Base Repository to pull from' - required: false - default: 'magic-modules' - sha: - description: "The commit SHA in magic-modules repository to execute against and where the status result will be posted" - required: true - -jobs: - pre-build-validation: - runs-on: ubuntu-latest - steps: - - name: Checkout Repository - uses: actions/checkout@v3 - with: - repository: ${{ github.event.inputs.owner }}/${{ github.event.inputs.repo }} - ref: ${{ github.event.inputs.sha }} - path: repo - fetch-depth: 0 - - name: Check for mmv1 product file changes - id: pull_request - run: | - cd repo - git config user.name "modular-magician" - git config user.email "magic-modules@google.com" - git merge --no-ff origin/main - yamlfiles=$(git diff --name-only origin/main -- mmv1/products) - if [ ! -z "$yamlfiles" ]; then - echo "yamlfiles=repo/${yamlfiles//$'\n'/ repo/}" >> $GITHUB_OUTPUT - fi - - name: Get Job URL - if: ${{ !failure() && steps.pull_request.outputs.yamlfiles != '' }} - id: get_job - run: | - response=$(curl --get -Ss -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.run_id }}/attempts/${{ github.run_attempt }}/jobs") - html_url=$(echo "$response" | jq -r --arg job_name "${{ github.job }}" '.jobs | map(select(.name == $job_name)) | .[0].html_url') - echo "url=${html_url}" >> $GITHUB_OUTPUT - - name: Install yamllint - if: ${{ !failure() && steps.pull_request.outputs.yamlfiles != '' }} - run: pip install yamllint - - name: Lint YAML files - if: ${{ !failure() && steps.pull_request.outputs.yamlfiles != '' }} - run: yamllint -c repo/.yamllint ${{steps.pull_request.outputs.yamlfiles}} - - name: Post Result Status to Pull Request - if: ${{ !cancelled() && steps.pull_request.outputs.yamlfiles != '' }} - run: | - curl -X POST -H "Authorization: token ${{secrets.GITHUB_TOKEN}}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/GoogleCloudPlatform/magic-modules/statuses/${{github.event.inputs.sha}}" \ - -d '{ - "context": "${{ github.event.inputs.repo }}${{ env.status_suffix }}", - "target_url": "${{ steps.get_job.outputs.url }}", - "state": "${{ job.status }}" - }' diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml new file mode 100644 index 000000000000..cd6776998c7c --- /dev/null +++ b/.github/workflows/scorecard.yml @@ -0,0 +1,72 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '33 3 * * 0' + push: + branches: [ "main" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4 + with: + sarif_file: results.sarif diff --git a/.github/workflows/test-tgc.yml b/.github/workflows/test-tgc.yml index 7c8c8f7376de..e1654f092c1e 100644 --- a/.github/workflows/test-tgc.yml +++ b/.github/workflows/test-tgc.yml @@ -40,12 +40,19 @@ jobs: with: repository: ${{ github.event.inputs.owner }}/${{ github.event.inputs.repo }} ref: ${{ github.event.inputs.branch }} - path: tgc fetch-depth: 2 + - name: Cache Go modules and build cache + uses: actions/cache@v3 + with: + path: | + ~/go/pkg/mod + key: ${{ runner.os }}-test-${{ github.event.inputs.repo }}-${{ hashFiles('go.sum') }} + restore-keys: | + ${{ runner.os }}-test-${{ github.event.inputs.repo }}-${{ hashFiles('go.sum') }} + ${{ runner.os }}-test-${{ github.event.inputs.repo }}- - name: Check for Code Changes id: pull_request run: | - cd tgc gofiles=$(git diff --name-only HEAD~1 | { grep -e "\.go$" -e "go.mod$" -e "go.sum$" || test $? = 1; }) if [ -z "$gofiles" ]; then echo "has_changes=false" >> $GITHUB_OUTPUT @@ -72,18 +79,18 @@ jobs: }' - name: Set up Go if: ${{ !failure() && steps.pull_request.outputs.has_changes == 'true' }} - uses: actions/setup-go@v4 + uses: actions/setup-go@v3 with: - go-version: '^1.19.9' + go-version: '^1.19' - name: Build Terraform Google Conversion if: ${{ !failure() && steps.pull_request.outputs.has_changes == 'true' }} run: | - cd tgc - go build + go mod edit -replace github.com/hashicorp/terraform-provider-google-beta=github.com/${{ github.event.inputs.owner }}/terraform-provider-google-beta@${{ github.event.inputs.branch }} + go mod tidy + make build - name: Run Unit Tests if: ${{ !failure() && steps.pull_request.outputs.has_changes == 'true' }} run: | - cd tgc make test - name: Post Result Status to Pull Request if: ${{ !cancelled() }} diff --git a/.github/workflows/test-tpg.yml b/.github/workflows/test-tpg.yml index 698785a415d2..de61efccb631 100644 --- a/.github/workflows/test-tpg.yml +++ b/.github/workflows/test-tpg.yml @@ -40,12 +40,20 @@ jobs: with: repository: ${{ github.event.inputs.owner }}/${{ github.event.inputs.repo }} ref: ${{ github.event.inputs.branch }} - path: provider fetch-depth: 2 + - name: Cache Go modules and build cache + uses: actions/cache@v3 + with: + path: | + ~/go/pkg/mod + ~/.cache/go-build + key: ${{ runner.os }}-test-${{ github.event.inputs.repo }}-${{hashFiles('go.sum','google-*/transport/**','google-*/tpgresource/**','google-*/acctest/**','google-*/envvar/**','google-*/sweeper/**','google-*/verify/**') }} + restore-keys: | + ${{ runner.os }}-test-${{ github.event.inputs.repo }}-${{ hashFiles('go.sum') }} + ${{ runner.os }}-test-${{ github.event.inputs.repo }}- - name: Check for Code Changes id: pull_request run: | - cd provider gofiles=$(git diff --name-only HEAD~1 | { grep -e "\.go$" -e "go.mod$" -e "go.sum$" || test $? = 1; }) if [ -z "$gofiles" ]; then echo "has_changes=false" >> $GITHUB_OUTPUT @@ -72,28 +80,24 @@ jobs: }' - name: Set up Go if: ${{ !failure() && steps.pull_request.outputs.has_changes == 'true' }} - uses: actions/setup-go@v4 + uses: actions/setup-go@v3 with: - go-version: '^1.19.1' + go-version: '^1.19' - name: Build Provider if: ${{ !failure() && steps.pull_request.outputs.has_changes == 'true' }} run: | - cd provider go build - name: Run Unit Tests if: ${{ !failure() && steps.pull_request.outputs.has_changes == 'true' }} run: | - cd provider - make test + make testnolint TESTARGS="-p 4" - name: Lint Check if: ${{ !cancelled() && steps.pull_request.outputs.has_changes == 'true' }} run: | - cd provider make lint - name: Documentation Check if: ${{ !cancelled() && steps.pull_request.outputs.has_changes == 'true' }} run: | - cd provider make docscheck - name: Post Result Status to Pull Request if: ${{ !cancelled() }} diff --git a/README.md b/README.md index 7004dbf67973..1035baf6e4a7 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ # Magic Modules Magic Modules is a code generator and CI system that's used to develop the Terraform providers -for Google Platform, [`google`](https://github.com/hashicorp/terraform-provider-google) (or TPG) and +for Google Cloud Platform, [`google`](https://github.com/hashicorp/terraform-provider-google) (or TPG) and [`google-beta`](https://github.com/hashicorp/terraform-provider-google-beta) (or TPGB). Magic Modules allows contributors to make changes against a single codebase and develop both diff --git a/docs/content/best-practices/_index.md b/docs/content/best-practices/_index.md index 192af1b155a4..81eebff07ebb 100644 --- a/docs/content/best-practices/_index.md +++ b/docs/content/best-practices/_index.md @@ -23,9 +23,10 @@ In complex cases, it is better to mark the field `ForceNew` to ensure that users Some resources, such as databases, have a significant risk of unrecoverable data loss if the resource is accidentally deleted due to a change to a ForceNew field. For these resources, the best practice is to add a `deletion_protection` field that defaults to `true`, which prevents the resource from being deleted if enabled. Although it is a small breaking change, for users, the benefits of `deletion_protection` defaulting to `true` outweigh the cost. -APIs also sometimes add `deletion_protection` fields, which will generally default to `false` for backwards-compatibility reasons. Any `deletion_protection` API field added to an existing Terraform resource must match the API default initially. The default may be set to `true` in the next major release. For new Terraform resources, any `deletion_protection` field should default to `true` in Terraform regardless of the API default. +APIs also sometimes add `deletion_protection` fields, which will generally default to `false` for backwards-compatibility reasons. Any `deletion_protection` API field added to an existing Terraform resource must match the API default initially. The default may be set to `true` in the next major release. For new Terraform resources, any `deletion_protection` field should default to `true` in Terraform regardless of the API default. When creating the corresponding Terraform field, the name +should match the API field name (i.e. it need not literally be named `deletion_protection` if the API uses something different) and should be the same field type (example: if the API field is an enum, so should the Terraform field). -A resource can have up to two `deletion_protection` fields (with different names): one that represents a field in the API, and one that is only in Terraform. This could happen because the API added its field after `deletion_protection` already existed in Terraform; it could also happen because a separate field was added in Terraform to make sure that `deletion_protection` is enabled by default. In either case, they should be reconciled into a single field (that defaults to `true`) in the next major release. +A resource can have up to two `deletion_protection` fields (with different names): one that represents a field in the API, and one that is only in Terraform. This could happen because the API added its field after `deletion_protection` already existed in Terraform; it could also happen because a separate field was added in Terraform to make sure that `deletion_protection` is enabled by default. In either case, they should be reconciled into a single field (that defaults to enabled and whose name matches the API field) in the next major release. Resources that do not have a significant risk of unrecoverable data loss or similar critical concern will not be given `deletion_protection` fields. @@ -40,3 +41,152 @@ Some resources need to let users control the actions taken add deletion time. Fo One common example is `ABANDON`, which is useful if the resource is safe to delete from Terraform but could cause problems if deleted from the API - for example, `google_bigtable_gc_policy` deletion can fail in replicated instances. `ABANDON` indicates that attempts to delete the resource should remove it from state without actually deleting it. See [magic-modules#13107](https://github.com/hashicorp/terraform-provider-google/pull/13107) for an example of adding a `deletion_policy` field to an existing resource. + +## Add labels and annotations support + +The new labels model and the new annotations model are introduced in [Terraform Google Provider 5.0.0](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/version_5_upgrade#provider). + +There are now three label-related fields with the new labels model: +* The `labels` field is now non-authoritative and only manages the label keys defined in your configuration for the resource. +* The `terraform_labels` cannot be specified directly by the user. It merges the labels defined in the resource's configuration and the default labels configured in the provider block. If the same label key exists on both the resource level and provider level, the value on the resource will override the provider-level default. +* The output-only `effective_labels` will list all the labels present on the resource in GCP, including the labels configured through Terraform, the system, and other clients. + +There are now two annotation-related fields with the new annotations model: +* The `annotations` field is now non-authoritative and only manages the annotation keys defined in your configuration for the resource. +* The output-only `effective_annotations` will list all the annotations present on the resource in GCP, including the annotations configured through Terraform, the system, and other clients. + +This document describes how to add `labels` and `annotations` field to resources to support the new models. + +### Labels support +When adding a new `labels` field, please make the changes below to support the new labels model. Otherwise, it has to wait for the next major release to make the changes. + +#### MMv1 resources + +1. Use the type `KeyValueLabels` for the standard resource `labels` field. The standard resource `labels` field could be the top level `labels` field or the nested `labels` field inside the top level `metadata` field. Don't add `default_from_api: true` to this field or don't use this type for other `labels` fields in the resource. `KeyValueLabels` will add all of changes required for the new model automatically. + +```yaml +- !ruby/object:Api::Type::KeyValueLabels + name: 'labels' + description: | + The labels associated with this dataset. You can use these to + organize and group your datasets. +``` +2. In the handwritten acceptance tests, add `labels` and `terraform_labels` to `ImportStateVerifyIgnore` if `labels` field is in the configuration. + +```go +ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, +``` +3. In the corresponding data source, after the resource read method, call the function `tpgresource.SetDataSourceLabels(d)` to make `labels` and `terraform_labels` have all of the labels on the resource. + +```go +err = resourceArtifactRegistryRepositoryRead(d, meta) +if err != nil { + return err +} + +if err := tpgresource.SetDataSourceLabels(d); err != nil { + return err +} +``` + +#### Handwritten resources + +1. Add `tpgresource.SetLabelsDiff` to `CustomizeDiff` of the resource. +```go +CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, +), +``` +2. Add `labels` field and add more attributes (such as `ForceNew: true,`, `Set: schema.HashString,`) to this field if necessary. +```go +"labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A set of key/value label pairs to assign to the project. + + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field 'effective_labels' for all of the labels present on the resource.`, +}, +``` +3. Add output only field `terraform_labels` and add more attributes (such as `Set: schema.HashString,`) to this field if necessary. Don't add `ForceNew:true,` to this field. +```go +"terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, +}, +``` +4. Add output only field `effective_labels` and add more attributes (such as `ForceNew: true,`, `Set: schema.HashString,`) to this field if necessary. +```go +"effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, +}, +``` +5. In the create method, use the value of `effective_labels` in API request. +6. In the update method, use the value of `effective_labels` in API request. +7. In the read mehtod, set `labels`, `terraform_labels` and `effective_labels` to state. +```go +if err := tpgresource.SetLabels(res.Labels, d, "labels"); err != nil { + return fmt.Errorf("Error setting labels: %s", err) +} +if err := tpgresource.SetLabels(res.Labels, d, "terraform_labels"); err != nil { + return fmt.Errorf("Error setting terraform_labels: %s", err) +} +if err := d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("Error setting effective_labels: %s", err) +} +``` +8. In the handwritten acceptance tests, add `labels` and `terraform_labels` to `ImportStateVerifyIgnore`. +9. In the corresponding data source, after the resource read method, call the function `tpgresource.SetDataSourceLabels(d)` to make `labels` and `terraform_labels` have all of the labels on the resource. +10. Add the documentation for these label-related fields. + +### Annotations support +When adding a new `annotations` field, please make the changes below below to support the new annotations model. Otherwise, it has to wait for the next major release to make the breaking changes. + +#### MMv1 resources + +1. Use the type `KeyValueAnnotations` for the standard resource `annotations` field. The standard resource `annotations` field could be the top level `annotations` field or the nested `annotations` field inside the top level `metadata` field. Don't add `default_from_api: true` to this field or don't use this type for other `annotations` fields in the resource. `KeyValueAnnotations` will add all of changes required for the new model automatically. + +```yaml +- !ruby/object:Api::Type::KeyValueAnnotations + name: 'annotations' + description: 'Client-specified annotations. This is distinct from labels.' +``` +2. In the handwritten acceptance tests, add `annotations` to `ImportStateVerifyIgnore` if `annotations` field is in the configuration. + +```go +ImportStateVerifyIgnore: []string{"annotations"}, +``` +3. In the corresponding data source, after the resource read method, call the function `tpgresource.SetDataSourceAnnotations(d)` to make `annotations` have all of the annotations on the resource. + +```go +err = resourceSecretManagerSecretRead(d, meta) +if err != nil { + return err +} + +if err := tpgresource.SetDataSourceLabels(d); err != nil { + return err +} + +if err := tpgresource.SetDataSourceAnnotations(d); err != nil { + return err +} +``` + +#### Handwritten resources + +1. Add `tpgresource.SetAnnotationsDiff` to `CustomizeDiff` of the resource. +2. Add `annotations` field and add more attributes (such as `ForceNew: true,`, `Set: schema.HashString,`) to this field if necessary. +3. Add output only field `effective_annotations` and add more attributes (such as `ForceNew: true,`, `Set: schema.HashString,`) to this field if necessary. +4. In the create method, use the value of `effective_annotations` in API request. +5. In the update method, use the value of `effective_annotations` in API request. +6. In the read mehtod, set `annotations`, and `effective_annotations` to state. +7. In the handwritten acceptance tests, add `annotations` to `ImportStateVerifyIgnore`. +8. In the corresponding data source, after the resource read method, call the function `tpgresource.SetDataSourceAnnotations(d)` to make `annotations` have all of the labels on the resource. +9. Add the documentation for these annotation-related fields. \ No newline at end of file diff --git a/docs/content/contribute/create-pr.md b/docs/content/contribute/create-pr.md index 1711e8207206..5530537fa4e5 100644 --- a/docs/content/contribute/create-pr.md +++ b/docs/content/contribute/create-pr.md @@ -18,13 +18,16 @@ weight: 10 1. A reviewer will automatically be assigned to your PR. 1. Creating a new pull request or pushing a new commit automatically triggers our CI pipelines and workflows. After CI starts, downstream diff generation takes about 10 minutes; VCR tests can take up to 2 hours. If you are a community contributor, some tests will only run after approval from a reviewer. + - While convenient, relying on CI to test iterative changes to PRs often adds extreme latency to reviews if there are errors in test configurations or at runtime. We **strongly** recommend you [test your changes locally before pushing]({{< ref "/develop/run-tests" >}}) even after the initial change. 1. If your assigned reviewer does not respond to changes on a pull request within two US business days, ping them on the pull request. {{< hint info >}} **TIP:** Speeding up review: +1. [Test your changes locally before pushing]({{< ref "/develop/run-tests" >}}) to iterate faster. + - You can push them and test in parallel as well. New CI runs will preempt old ones where possible. 1. Resolve failed [status checks](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/collaborating-on-repositories-with-code-quality-features/about-status-checks) quickly - - Ask your reviewer for help if you get stuck. -1. [Self-review your PR]({{< ref "/contribute/review-pr" >}}") or ask someone you know to review + - Directly ask your reviewer for help if you don't know how to proceed. If there are failed checks they may only check in if there's no progress after a couple days. +1. [Self-review your PR]({{< ref "/contribute/review-pr" >}}) or ask someone else familiar with Terraform to review {{< /hint >}} diff --git a/docs/content/contribute/review-pr.md b/docs/content/contribute/review-pr.md index 57cec1fcaf0c..28cf589838e3 100644 --- a/docs/content/contribute/review-pr.md +++ b/docs/content/contribute/review-pr.md @@ -18,19 +18,21 @@ This page provides guidelines for reviewing a Magic Modules pull request (PR). * features only available in beta are not included in the GA google provider. * features added to the GA provider are also included in the beta provider -- beta should be a strict superset of GA. 1. no [breaking changes]({{< ref "/develop/make-a-breaking-change" >}}) are introduced without a valid justification. Add the `override-breaking-change` label if there is a valid justification. - 1. verify the change actually resolves the linked issues, if any. + * remember to check for changes in default behaviour like changing the flags on delete! + 1. verify the change **fully** resolves the linked issues, if any. If it does not, change the "Fixes" message to "Part of". 1. Check the tests added/modified to ensure that: 1. all fields added/updated in the PR appear in at least one test. * It is advisable to test updating from a non-zero value to a zero value if feasible. 1. all mutable fields are tested in at least one update test. 1. all resources in the acceptance tests have a `tf-test` or `tf_test` prefix in their primary id field. + 1. all handwritten test Config steps include import steps following them 1. all related tests pass in GA for features promoted from beta to GA. {{< hint info >}}Note: Presubmit VCR tests do not run in GA. Manual testing is required for promoted GA features. {{< /hint >}} 1. newly added or modified diff suppress functions are tested in at least one unit test. 1. the linked issue (if any) is covered by at least one test that reproduces the issue - * for example - a bugfix should test the bug (or explain why it's not feasible to do so in the description) and an enhancement should test the new behaviour(s). + * for example - a bugfix should test the bug (or explain why it's not feasible to do so in the description, including manual results when possible) and an enhancement should test the new behaviour(s). 1. all related PR presubmit tests have been completed successfully, including: * terraform-provider-breaking-change-test * presubmit-rake-tests @@ -40,6 +42,7 @@ This page provides guidelines for reviewing a Magic Modules pull request (PR). {{< hint info >}}Note: Some acceptance tests may be skipped in VCR and manual testing is required. {{< /hint >}} + 1. a significant number of preexisting tests have not been modified. Changing old tests often indicates a change is backwards incompatible. 1. Check documentation to ensure 1. resouce-level and field-level documentation are generated correctly for MMv1-based resource 1. documentation is added manually for handwritten resources. diff --git a/docs/content/develop/add-handwritten-datasource.md b/docs/content/develop/add-handwritten-datasource.md index 9c4038c3da44..76d75c1ffb9f 100644 --- a/docs/content/develop/add-handwritten-datasource.md +++ b/docs/content/develop/add-handwritten-datasource.md @@ -22,6 +22,8 @@ a new datasource there are 5 steps to doing so. 1. Create a new datasource declaration file and a corresponding test file 1. Add Schema and Read operation implementation + - If there is `labels` field with type `KeyValueLabels` in the corresponding resource, in the datasource Read operation implementation, after the resource read method, call the function `tpgresource.SetDataSourceLabels(d)` to make `labels` and `terraform_labels` have all of the labels on the resource. + - If there is `annotations` field with type `KeyValueAnnotations` in the corresponding resource, in the datasource Read operation implementation, after the resource read method, call the function `tpgresource.SetDataSourceAnnotations(d)` to make `annotations` have all of the annotations on the resource. 1. Add the datasource to the `provider.go.erb` index 1. Implement a test which will create and resources and read the corresponding datasource diff --git a/docs/content/develop/breaking-changes.md b/docs/content/develop/breaking-changes.md index 3afa8006093c..76bd2ba39a99 100644 --- a/docs/content/develop/breaking-changes.md +++ b/docs/content/develop/breaking-changes.md @@ -42,8 +42,13 @@ For more information, see the ID format will break the ability to parse the IDs from any deployments. * Removing or altering resource import ID formats * Automation written by end users may rely on specific import formats. -* Major changes to default resource behavior - * For example, changing the default implementation of resource deletion +* Changes to default resource behavior + * Changing resource deletion behavior + * In limited cases changes may be permissible if the prior behavior could **never** succeed. + * Changing resource deletion to skip deleting the resource by default if delete was previously called + * Changing resource deletion to specify a force flag + * Adding a new field with a default different from the API default + * If an API default is expected to change- a breaking change for the API- use `default_from_api` which will avoid sending a value and safely take the server default in Terraform ## Field-level breaking changes diff --git a/docs/content/develop/custom-code.md b/docs/content/develop/custom-code.md index 81166d49b25d..df36e14142b1 100644 --- a/docs/content/develop/custom-code.md +++ b/docs/content/develop/custom-code.md @@ -1,6 +1,6 @@ --- title: "Add custom resource code" -weight: 31 +weight: 32 --- # Add custom resource code diff --git a/docs/content/develop/field-reference.md b/docs/content/develop/field-reference.md new file mode 100644 index 000000000000..0e4786459a71 --- /dev/null +++ b/docs/content/develop/field-reference.md @@ -0,0 +1,257 @@ +--- +title: "MMv1 field reference" +weight: 31 +aliases: + - /reference/field-reference +--- + +# MMv1 field reference + +This page documents commonly-used properties for fields. For a full list of +available properties, see [type.rb ↗](https://github.com/GoogleCloudPlatform/magic-modules/blob/main/mmv1/api/type.rb). + +## Shared properties + +### `min_version: beta` +Marks the field (and any subfields) as beta-only. Ensure a beta version block +is present in provider.yaml. Do not use if an ancestor field (or the overall +resource) is already marked as beta-only. + +### `immutable` +If true, the field (and any subfields) are considered immutable - that is, +only settable on create. If unset or false, the field is still considered +immutable if any ancestor field (or the overall resource) is immutable, +unless `update_url` is set. + +Example: + +```yaml +immutable: true +``` + +### `update_url` +If set, changes to the field's value trigger a separate call to a specific +API method for updating the field's value. The field is not considered +immutable even if an ancestor field (or the overall resource) is immutable. +Terraform field names enclosed in double curly braces are replaced with the +field values from the resource at runtime. + +Example: + +```yaml +update_url: 'projects/{{project}}/locations/{{location}}/resourcenames/{{name}}/setFieldName' +``` + +### `update_verb` +If update_url is also set, overrides the verb used to update this specific +field. Allowed values: :POST, :PUT, :PATCH. Default: Resource's update_verb +(which defaults to :PUT if unset). + +Example: + +```yaml +update_verb: :POST +``` + +### `required` +If true, the field is required. If unset or false, the field is optional. + +Example: + +```yaml +required: true +``` + +### `output` +If true, the field is output-only - that is, it cannot be configured by the +user. If unset or false, the field is configurable. + +Example: + +```yaml +output: true +``` + +### `sensitive` +If true, the field is considered "sensitive", which means that its value will +be obscured in Terraform output such as plans. If false, the value will not be +obscured. Either way, the value will still be stored in plaintext in Terraform +state. See +[Handling Sensitive Values in State](https://developer.hashicorp.com/terraform/plugin/best-practices/sensitive-state) +for more information. + +Sensitive fields are often not returned by the API (because they are sensitive). +In this case, the field will also need to use [`ignore_read` or a `custom_flatten` function]({{< ref "/develop/field-reference#ignore_read" >}}). + +Example: + +```yaml +sensitive: true +``` + +### `ignore_read` +If true, the provider sets the field's value in the resource state based only +on the user's configuration. If false or unset, the provider sets the field's +value in the resource state based on the API response. Only use this attribute +if the field cannot be read from GCP due to either API or provider constraints. + +Nested fields currently +[do not support `ignore_read`](https://github.com/hashicorp/terraform-provider-google/issues/12410) +but can replicate the behavior by implementing a +[`custom_flatten`]({{< ref "/develop/custom-code#custom_flatten" >}}) +that always ignores the value returned by the API. +](https://github.com/GoogleCloudPlatform/magic-modules/blob/5923d4cb878396a04bed9beaf22a8478e8b1e6a5/mmv1/templates/terraform/custom_flatten/source_representation_instance_configuration_password.go.erb). +Any fields using a custom flatten also need to be added to `ignore_read_extra` +for any examples where the field is set. + +Example: YAML + +```yaml +ignore_read: true +``` + +Example: Custom flatten + +```go +func flatten<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("password") +} +``` + +### `default_value` +Sets a client-side default value for the field. This should be used if the +API has a default value that applies in all cases and is stable. Removing +or changing a default value is a breaking change. If unset, the field defaults +to an "empty" value (such as zero, false, or an empty string). + +Example: + +```yaml +default_value: DEFAULT_VALUE +``` + +### `default_from_api` +If true, and the field is either not set or set to an "empty" value (such as +zero, false, or empty strings), the provider accepts any value returned from +the API as the value for the field. If false, and the field is either not set +or set to an "empty" value, the provider treats the field's `default_value` +as the value for the field and shows a diff if the API returns any other +value for the field. This attribute is useful for complex or +frequently-changed API-side defaults, but provides less useful information at +plan time than `default_value` and causes the provider to ignore user +configurations that explicitly set the field to an "empty" value. +`default_from_api` and `send_empty_value` cannot both be true on the same field. + +Example: + +```yaml +default_from_api: true +``` + +### `send_empty_value` +If true, the provider sends "empty" values (such as zero, false, or empty +strings) to the API if set explicitly in the user's configuration. If false, +"empty" values cause the field to be omitted entirely from the API request. +This attribute is useful for fields where the API would behave differently +for an "empty" value vs no value for a particular field - for example, +boolean fields that have an API-side default of true. +`send_empty_value` and `default_from_api` cannot both be true on the same field. + +Example: + +```yaml +send_empty_value: true +``` + +### `conflicts` +Specifies a list of fields (excluding the current field) that cannot be +specified at the same time as the current field. Must be set separately on +all listed fields. + +Example: + +```yaml +conflicts: + - field_one + - nested_object.0.nested_field +``` + +### `exactly_one_of` +Specifies a list of fields (including the current field) that cannot be +specified at the same time (but at least one of which must be set). Must be +set separately on all listed fields. + +Example: + +```yaml +exactly_one_of: + - field_one + - nested_object.0.nested_field +``` + +## `Enum` properties + +### `values` +Enum only. Sets allowed values as ruby "literal constants" (prefixed with a +colon). If the allowed values change frequently, use a String field instead +to allow better forwards-compatibility, and link to API documentation +stating the current allowed values in the String field's description. Do not +include UNSPECIFIED values in this list. + +Example: + +```yaml +values: + - :VALUE_ONE + - :VALUE_TWO +``` + +## `Array` properties + +### `item_type` +Array only. Sets the expected type of the items in the array. Primitives +should use the name of the primitive class as a string; other types should +define the attributes of the nested type. + +Example: Primitive value + +```yaml +item_type: Api::Type::String +``` + +Example: Enum value + +```yaml +item_type: !ruby/object:Api::Type::Enum + name: 'required but unused' + description: 'required but unused' + values: + - :VALUE_ONE + - :VALUE_TWO +``` + +Example: Nested object + +```yaml +item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'FIELD_NAME' + description: | + MULTI_LINE_FIELD_DESCRIPTION +``` + +## `NestedObject` properties + +### `properties` +NestedObject only. Defines fields nested inside the current field. + +Example: + +```yaml +properties: + - !ruby/object:Api::Type::String + name: 'FIELD_NAME' + description: | + MULTI_LINE_FIELD_DESCRIPTION +``` diff --git a/docs/content/develop/resource.md b/docs/content/develop/resource.md index a2e605c71dad..2e0cd6afc1ec 100644 --- a/docs/content/develop/resource.md +++ b/docs/content/develop/resource.md @@ -220,122 +220,133 @@ For more information about types of resources and the generation process overall {{< tab "MMv1" >}} 1. For each API field, copy the following template into the resource's `properties` attribute. Be sure to indent appropriately. +{{< tabs "MMv1 types" >}} +{{< tab "Simple" >}} ```yaml -# Supported types: String, Integer, Boolean, Double, Enum, -# ResourceRef (link to a GCP resource), KeyValuePairs (string -> string map), -# Array, and NestedObject - !ruby/object:Api::Type::String name: 'API_FIELD_NAME' description: | MULTILINE_FIELD_DESCRIPTION - # Marks the field (and any subfields) as beta-only. Ensure a beta version block - # is present in provider.yaml. Do not use if an ancestor field (or the overall - # resource) is already marked as beta-only. - # min_version: beta + min_version: beta + immutable: true + required: true + output: true + conflicts: + - field_one + - nested_object.0.nested_field + exactly_one_of: + - field_one + - nested_object.0.nested_field + +``` + +Replace `String` in the field type with one of the following options: - # If true, the field (and any subfields) are considered immutable - that is, - # only settable on create. If unset or false, the field is still considered - # immutable if any ancestor field (or the overall resource) is immutable, - # unless `update_url` is set. - # immutable: true - - # If set, changes to the field's value trigger a separate call to a specific - # API method for updating the field's value. The field is not considered - # immutable even if an ancestor field (or the overall resource) is immutable. - # Terraform field names enclosed in double curly braces are replaced with the - # field values from the resource at runtime. - # update_url: 'projects/{{project}}/locations/{{location}}/resourcenames/{{name}}/setFieldName' - - # If update_url is also set, overrides the verb used to update this specific - # field. Allowed values: :POST, :PUT, :PATCH. Default: Resource's update_verb - # (which defaults to :PUT if unset). - # update_verb: :POST - - # If true, the field is required. If unset or false, the field is optional. - # required: true - - # If true, the field is output-only - that is, it cannot be configured by the - # user. If unset or false, the field is configurable. - # output: true - - # If true, the provider sets the field's value in the resource state based only - # on the user's configuration. If false or unset, the provider sets the field's - # value in the resource state based on the API response. Only use this attribute - # if the field cannot be read from GCP due to either API or provider constraints. - # ignore_read: true - - # Sets a client-side default value for the field. This should be used if the - # API has a default value that applies in all cases and is stable. Removing - # or changing a default value is a breaking change. If unset, the field defaults - # to an "empty" value (such as zero, false, or an empty string). - # default_value: DEFAULT_VALUE - - # If true, and the field is either not set or set to an "empty" value (such as - # zero, false, or empty strings), the provider accepts any value returned from - # the API as the value for the field. If false, and the field is either not set - # or set to an "empty" value, the provider treats the field's `default_value` - # as the value for the field and shows a diff if the API returns any other - # value for the field. This attribute is useful for complex or - # frequently-changed API-side defaults, but provides less useful information at - # plan time than `default_value` and causes the provider to ignore user - # configurations that explicitly set the field to an "empty" value. - # `default_from_api` and `send_empty_value` cannot both be true on the same field. - # default_from_api: true - - # If true, the provider sends "empty" values (such as zero, false, or empty - # strings) to the API if set explicitly in the user's configuration. If false, - # "empty" values cause the field to be omitted entirely from the API request. - # This attribute is useful for fields where the API would behave differently - # for an "empty" value vs no value for a particular field - for example, - # boolean fields that have an API-side default of true. - # `send_empty_value` and `default_from_api` cannot both be true on the same field. - # send_empty_value: true - - # Specifies a list of fields (excluding the current field) that cannot be - # specified at the same time as the current field. Must be set separately on - # all listed fields. - # conflicts: - # - field_one - # - nested_object.0.nested_field - - # Specifies a list of fields (including the current field) that cannot be - # specified at the same time (but at least one of which must be set). Must be - # set separately on all listed fields. - # exactly_one_of: - # - field_one - # - nested_object.0.nested_field - - # Enum only. Sets allowed values as ruby "literal constants" (prefixed with a - # colon). If the allowed values change frequently, use a String field instead - # to allow better forwards-compatibility, and link to API documentation - # stating the current allowed values in the String field's description. Do not - # include UNSPECIFIED values in this list. - # values: - # - :VALUE_ONE - # - :VALUE_TWO - - # Array only. Sets the expected type of the items in the array. Primitives - # should use the name of the primitive class as a string; other types should - # define the attributes of the nested type. - # item_type: Api::Type::String - # item_type: !ruby/object:Api::Type::Enum - # name: 'required but unused' - # description: 'required but unused' - # values: - # - :VALUE_ONE - # - :VALUE_TWO - - # NestedObject only. Defines fields nested inside the current field. - # properties: - # - !ruby/object:Api::Type::String - # name: 'FIELD_NAME' - # description: | - # MULTI_LINE_FIELD_DESCRIPTION +- `String` +- `Integer` +- `Boolean` +- `Double` +- `KeyValuePairs` (string -> string map) +- `KeyValueLabels` (for standard resource 'labels' field) +- `KeyValueAnnotations` (for standard resource 'annotations' field) +{{< /tab >}} +{{< tab "Enum" >}} +```yaml +- !ruby/object:Api::Type::Enum + name: 'API_FIELD_NAME' + description: | + MULTILINE_FIELD_DESCRIPTION + min_version: beta + immutable: true + required: true + output: true + conflicts: + - field_one + - nested_object.0.nested_field + exactly_one_of: + - field_one + - nested_object.0.nested_field + values: + - :VALUE_ONE + - :VALUE_TWO +``` +{{< /tab >}} +{{< tab "ResourceRef" >}} +```yaml +- !ruby/object:Api::Type::ResourceRef + name: 'API_FIELD_NAME' + description: | + MULTILINE_FIELD_DESCRIPTION + min_version: beta + immutable: true + required: true + output: true + conflicts: + - field_one + - nested_object.0.nested_field + exactly_one_of: + - field_one + - nested_object.0.nested_field + resource: 'ResourceName' + imports: 'name' ``` +{{< /tab >}} +{{< tab "Array" >}} +```yaml +- !ruby/object:Api::Type::Array + name: 'API_FIELD_NAME' + description: | + MULTILINE_FIELD_DESCRIPTION + min_version: beta + immutable: true + required: true + output: true + conflicts: + - field_one + - nested_object.0.nested_field + exactly_one_of: + - field_one + - nested_object.0.nested_field + # Array of primitives + item_type: Api::Type::String + + # Array of nested objects + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'FIELD_NAME' + description: | + MULTI_LINE_FIELD_DESCRIPTION +``` +{{< /tab >}} +{{< tab "NestedObject" >}} +```yaml +- !ruby/object:Api::Type::Array + name: 'API_FIELD_NAME' + description: | + MULTILINE_FIELD_DESCRIPTION + min_version: beta + immutable: true + required: true + output: true + conflicts: + - field_one + - nested_object.0.nested_field + exactly_one_of: + - field_one + - nested_object.0.nested_field + properties: + - !ruby/object:Api::Type::String + name: 'FIELD_NAME' + description: | + MULTI_LINE_FIELD_DESCRIPTION +``` +{{< /tab >}} +{{< /tabs >}} + 2. Modify the field configuration according to the API documentation and behavior. -3. Delete all remaining comments in the field configuration (including attribute descriptions) that were copied from the above template. -> **Note:** The template includes the most commonly-used fields. For a comprehensive reference, see [Field reference ↗]({{}}). +> **Note:** The templates in this section only include the most commonly-used fields. For a comprehensive reference, see [MMv1 field reference]({{}}). For information about modifying the values sent and received for a field, see [Modify the API request or response]({{}}). {{< /tab >}} {{< tab "Handwritten" >}} 1. Add the field to the handwritten resource's schema. diff --git a/docs/content/develop/run-tests.md b/docs/content/develop/run-tests.md index afdc7fcf4c10..5c6b655991a2 100644 --- a/docs/content/develop/run-tests.md +++ b/docs/content/develop/run-tests.md @@ -92,7 +92,7 @@ aliases: 1. Run acceptance tests for only modified resources. (Full test runs can take over 9 hours.) See [Go's documentation](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for more information about `-run` and other flags. ```bash - make testacc TEST=./google-beta TESTARGS='-run=TestAccContainerNodePool' + make testacc TEST=./google-beta/services/container TESTARGS='-run=TestAccContainerNodePool' ``` @@ -100,7 +100,7 @@ aliases: 1. Optional: Save verbose test output to a file for analysis. ```bash - TF_LOG=DEBUG make testacc TEST=./google-beta TESTARGS='-run=TestAccContainerNodePool_basic' > output.log + TF_LOG=DEBUG make testacc TEST=./google-beta/services/container TESTARGS='-run=TestAccContainerNodePool_basic' > output.log ``` 1. Optional: Debug tests with [Delve](https://github.com/go-delve/delve). See [`dlv test` documentation](https://github.com/go-delve/delve/blob/master/Documentation/usage/dlv_test.md) for information about available flags. diff --git a/docs/content/reference/field-reference.md b/docs/content/reference/field-reference.md deleted file mode 100644 index 5e75b08687d5..000000000000 --- a/docs/content/reference/field-reference.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "Field YAML reference ↗" -weight: 30 -bookHref: "https://github.com/GoogleCloudPlatform/magic-modules/blob/main/mmv1/api/type.rb" ---- -FORCE MENU RENDER \ No newline at end of file diff --git a/mmv1/api/resource.rb b/mmv1/api/resource.rb index 3b975c2bd0c0..4ccec699fb8d 100644 --- a/mmv1/api/resource.rb +++ b/mmv1/api/resource.rb @@ -201,6 +201,10 @@ module Properties # mmv1/templates/terraform/state_migrations/ # used for maintaining state stability with resources first provisioned on older api versions. attr_reader :schema_version + # From this schema version on, state_upgrader code is generated for the resource. + # When unset, state_upgrade_base_schema_version defauts to 0. + # Normally, it is not needed to be set. + attr_reader :state_upgrade_base_schema_version attr_reader :state_upgraders # This block inserts the named function and its attribute into the # resource schema -- the code for the migrate_state function must @@ -217,6 +221,10 @@ module Properties # public ca external account keys attr_reader :skip_read + # Set to true for resources that wish to disable automatic generation of default provider + # value customdiff functions + attr_reader :skip_default_cdiff + # This enables resources that get their project via a reference to a different resource # instead of a project field to use User Project Overrides attr_reader :supports_indirect_user_project_override @@ -318,10 +326,12 @@ def validate check :error_retry_predicates, type: Array, item_type: String check :error_abort_predicates, type: Array, item_type: String check :schema_version, type: Integer + check :state_upgrade_base_schema_version, type: Integer, default: 0 check :state_upgraders, type: :boolean, default: false check :migrate_state, type: String check :skip_delete, type: :boolean, default: false check :skip_read, type: :boolean, default: false + check :skip_default_cdiff, type: :boolean, default: false check :supports_indirect_user_project_override, type: :boolean, default: false check :legacy_long_form_project, type: :boolean, default: false check :read_error_transform, type: String @@ -342,6 +352,10 @@ def all_properties ((@properties || []) + (@parameters || [])) end + def properties_with_excluded + @properties || [] + end + def properties (@properties || []).reject(&:exclude) end @@ -385,8 +399,13 @@ def all_resourcerefs # At Create, they have no value but they can just be read in anyways, and after a Read # they will need to be set in every Update. def settable_properties - all_user_properties.reject { |v| v.output && !v.is_a?(Api::Type::Fingerprint) } - .reject(&:url_param_only) + props = all_user_properties.reject do |v| + v.output && !v.is_a?(Api::Type::Fingerprint) && !v.is_a?(Api::Type::KeyValueEffectiveLabels) + end + props = props.reject(&:url_param_only) + props.reject do |v| + v.is_a?(Api::Type::KeyValueLabels) || v.is_a?(Api::Type::KeyValueAnnotations) + end end # Properties that will be returned in the API body @@ -439,6 +458,106 @@ def decoder? !@transport&.decoder.nil? end + def add_labels_related_fields(props, parent) + props.each do |p| + if p.is_a? Api::Type::KeyValueLabels + add_labels_fields(props, parent, p) + elsif p.is_a? Api::Type::KeyValueAnnotations + add_annotations_fields(props, parent, p) + elsif (p.is_a? Api::Type::NestedObject) && !p.all_properties.nil? + p.properties = add_labels_related_fields(p.all_properties, p) + end + end + props + end + + def add_labels_fields(props, parent, labels) + # The effective_labels field is used to write to API, instead of the labels field. + labels.ignore_write = true + labels.description = "#{labels.description}\n\n#{get_labels_field_note(labels.name)}" + + @custom_diff ||= [] + if parent.nil? || parent.flatten_object + @custom_diff.append('tpgresource.SetLabelsDiff') + elsif parent.name == 'metadata' + @custom_diff.append('tpgresource.SetMetadataLabelsDiff') + end + + props << build_terraform_labels_field('labels', labels) + props << build_effective_labels_field('labels', labels) + end + + def add_annotations_fields(props, parent, annotations) + # The effective_annotations field is used to write to API, + # instead of the annotations field. + annotations.ignore_write = true + note = get_labels_field_note(annotations.name) + annotations.description = "#{annotations.description}\n\n#{note}" + + @custom_diff ||= [] + if parent.nil? + @custom_diff.append('tpgresource.SetAnnotationsDiff') + elsif parent.name == 'metadata' + @custom_diff.append('tpgresource.SetMetadataAnnotationsDiff') + end + + props << build_effective_labels_field('annotations', annotations) + end + + def build_effective_labels_field(name, labels) + description = "All of #{name} (key/value pairs)\ + present on the resource in GCP, including the #{name} configured through Terraform,\ + other clients and services." + + Api::Type::KeyValueEffectiveLabels.new( + name: "effective#{name.capitalize}", + output: true, + api_name: name, + description:, + min_version: labels.field_min_version, + update_verb: labels.update_verb, + update_url: labels.update_url, + immutable: labels.immutable + ) + end + + def build_terraform_labels_field(name, labels) + description = "The combination of #{name} configured directly on the resource + and default #{name} configured on the provider." + + Api::Type::KeyValueTerraformLabels.new( + name: "terraform#{name.capitalize}", + output: true, + api_name: name, + description:, + min_version: labels.field_min_version, + ignore_write: true, + update_url: labels.update_url, + immutable: labels.immutable + ) + end + + # Return labels fields that should be added to ImportStateVerifyIgnore + def ignore_read_labels_fields(props) + fields = [] + props.each do |p| + if (p.is_a? Api::Type::KeyValueLabels) || + (p.is_a? Api::Type::KeyValueTerraformLabels) || + (p.is_a? Api::Type::KeyValueAnnotations) + fields << p.terraform_lineage + elsif (p.is_a? Api::Type::NestedObject) && !p.all_properties.nil? + fields.concat(ignore_read_labels_fields(p.all_properties)) + end + end + fields + end + + def get_labels_field_note(title) + "**Note**: This field is non-authoritative, and will only manage the #{title} present " \ +"in your configuration. +Please refer to the field `effective_#{title}` for all of the #{title} present on the resource." + end + # ==================== # Version-related methods # ==================== diff --git a/mmv1/api/type.rb b/mmv1/api/type.rb index 2d0900b1fd7d..f050ead7652a 100644 --- a/mmv1/api/type.rb +++ b/mmv1/api/type.rb @@ -23,7 +23,7 @@ module Fields include Api::Object::Named::Properties attr_reader :default_value - attr_reader :description + attr_accessor :description attr_reader :exclude # Add a deprecation message for a field that's been deprecated in the API @@ -36,13 +36,25 @@ module Fields # a different version. attr_reader :removed_message - attr_reader :output # If set value will not be sent to server on sync - attr_reader :immutable # If set to true value is used only on creation + # If set value will not be sent to server on sync. + # For nested fields, this also needs to be set on each descendant (ie. self, + # child, etc.). + attr_reader :output + + # If set to true, changes in the field's value require recreating the + # resource. + # For nested fields, this only applies at the current level. This means + # it should be explicitly added to each field that needs the ForceNew + # behavior. + attr_reader :immutable # url_param_only will not send the field in the resource body and will # not attempt to read the field from the API response. # NOTE - this doesn't work for nested fields attr_reader :url_param_only + + # For nested fields, this only applies within the parent. + # For example, an optional parent can contain a required child. attr_reader :required # [Additional query Parameters to append to GET calls. @@ -121,6 +133,9 @@ module Fields # if true, then we get the default value from the Google API if no value # is set in the terraform configuration for this field. # It translates to setting the field to Computed & Optional in the schema. + # For nested fields, this only applies at the current level. This means + # it should be explicitly added to each field that needs the defaulting + # behavior. attr_reader :default_from_api # https://github.com/hashicorp/terraform/pull/20837 @@ -266,6 +281,14 @@ def lineage "#{__parent.lineage}.#{name&.underscore}" end + # Prints the access path of the field in the configration eg: metadata.0.labels + # The only intended purpose is to get the value of the labes field by calling d.Get(). + def terraform_lineage + return name&.underscore if __parent.nil? || __parent.flatten_object + + "#{__parent.terraform_lineage}.0.#{name&.underscore}" + end + def to_json(opts = nil) # ignore fields that will contain references to parent resources and # those which will be added later @@ -728,6 +751,8 @@ def properties @properties.reject(&:exclude) end + attr_writer :properties + def nested_properties properties end @@ -755,6 +780,109 @@ def exclude_if_not_in_version!(version) # simpler property to generate and means we can avoid conditional logic # in Map. class KeyValuePairs < Composite + # Ignore writing the "effective_labels" and "effective_annotations" fields to API. + attr_accessor :ignore_write + + def initialize(name: nil, output: nil, api_name: nil, description: nil, min_version: nil, + ignore_write: nil, update_verb: nil, update_url: nil, immutable: nil) + super() + + @name = name + @output = output + @api_name = api_name + @description = description + @min_version = min_version + @ignore_write = ignore_write + @update_verb = update_verb + @update_url = update_url + @immutable = immutable + end + + def validate + super + check :ignore_write, type: :boolean, default: false + + return if @__resource.__product.nil? + + product_name = @__resource.__product.name + resource_name = @__resource.name + + if lineage == 'labels' || lineage == 'metadata.labels' || + lineage == 'configuration.labels' + if !(is_a? Api::Type::KeyValueLabels) && + # The label value must be empty string, so skip this resource + !(product_name == 'CloudIdentity' && resource_name == 'Group') && + + # The "labels" field has type Array, so skip this resource + !(product_name == 'DeploymentManager' && resource_name == 'Deployment') && + + # https://github.com/hashicorp/terraform-provider-google/issues/16219 + !(product_name == 'Edgenetwork' && resource_name == 'Network') && + + # https://github.com/hashicorp/terraform-provider-google/issues/16219 + !(product_name == 'Edgenetwork' && resource_name == 'Subnet') && + + # "userLabels" is the resource labels field + !(product_name == 'Monitoring' && resource_name == 'NotificationChannel') && + + # The "labels" field has type Array, so skip this resource + !(product_name == 'Monitoring' && resource_name == 'MetricDescriptor') + raise "Please use type KeyValueLabels for field #{lineage} " \ + "in resource #{product_name}/#{resource_name}" + end + elsif is_a? Api::Type::KeyValueLabels + raise "Please don't use type KeyValueLabels for field #{lineage} " \ + "in resource #{product_name}/#{resource_name}" + end + + if lineage == 'annotations' || lineage == 'metadata.annotations' + if !(is_a? Api::Type::KeyValueAnnotations) && + # The "annotations" field has "ouput: true", so skip this eap resource + !(product_name == 'Gkeonprem' && resource_name == 'BareMetalAdminClusterEnrollment') + raise "Please use type KeyValueAnnotations for field #{lineage} " \ + "in resource #{product_name}/#{resource_name}" + end + elsif is_a? Api::Type::KeyValueAnnotations + raise "Please don't use type KeyValueAnnotations for field #{lineage} " \ + "in resource #{product_name}/#{resource_name}" + end + end + + def field_min_version + @min_version + end + end + + # An array of string -> string key -> value pairs used specifically for the "labels" field. + # The field name with this type should be "labels" literally. + class KeyValueLabels < KeyValuePairs + def validate + super + return unless @name != 'labels' + + raise "The field #{name} has the type KeyValueLabels, but the field name is not 'labels'!" + end + end + + # An array of string -> string key -> value pairs used for the "terraform_labels" field. + class KeyValueTerraformLabels < KeyValuePairs + end + + # An array of string -> string key -> value pairs used for the "effective_labels" + # and "effective_annotations" fields. + class KeyValueEffectiveLabels < KeyValuePairs + end + + # An array of string -> string key -> value pairs used specifically for the "annotations" field. + # The field name with this type should be "annotations" literally. + class KeyValueAnnotations < KeyValuePairs + def validate + super + return unless @name != 'annotations' + + raise "The field #{name} has the type KeyValueAnnotations,\ + but the field name is not 'annotations'!" + end end # Map from string keys -> nested object entries diff --git a/mmv1/compiler.rb b/mmv1/compiler.rb index 33ec546c4ad8..bb5e4c405a68 100755 --- a/mmv1/compiler.rb +++ b/mmv1/compiler.rb @@ -32,6 +32,7 @@ require 'provider/terraform_kcc' require 'provider/terraform_oics' require 'provider/terraform_tgc' +require 'provider/terraform_tgc_cai2hcl' require 'provider/tflint' products_to_generate = nil @@ -224,6 +225,9 @@ end res_yaml = File.read(file_path) resource = Api::Compiler.new(res_yaml).run + resource.properties = resource.add_labels_related_fields( + resource.properties_with_excluded, nil + ) resource.validate resources.push(resource) end @@ -250,6 +254,9 @@ res_yaml = res_yaml.gsub('{{override_path}}', override_dir) end resource = Api::Compiler.new(res_yaml).run + resource.properties = resource.add_labels_related_fields( + resource.properties_with_excluded, nil + ) resource.validate resources.push(resource) end @@ -278,6 +285,7 @@ 'oics' => Provider::TerraformOiCS, 'validator' => Provider::TerraformGoogleConversion, 'tgc' => Provider::TerraformGoogleConversion, + 'tgc_cai2hcl' => Provider::CaiToTerraformConversion, 'kcc' => Provider::TerraformKCC, 'tflint' => Provider::TFLint } diff --git a/mmv1/products/accesscontextmanager/AccessLevel.yaml b/mmv1/products/accesscontextmanager/AccessLevel.yaml index 471c23829f69..e569f44574f4 100644 --- a/mmv1/products/accesscontextmanager/AccessLevel.yaml +++ b/mmv1/products/accesscontextmanager/AccessLevel.yaml @@ -243,6 +243,23 @@ properties: countries/regions. Format: A valid ISO 3166-1 alpha-2 code. item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'vpcNetworkSources' + description: 'The request must originate from one of the provided VPC networks in Google Cloud. Cannot specify this field together with `ip_subnetworks`.' + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'vpcSubnetwork' + description: 'Sub networks within a VPC network.' + properties: + - !ruby/object:Api::Type::String + name: 'network' + required: true + description: 'Required. Network name to be allowed by this Access Level. Networks of foreign organizations requires `compute.network.get` permission to be granted to caller.' + - !ruby/object:Api::Type::Array + name: 'vpcIpSubnetworks' + description: 'CIDR block IP subnetwork specification. Must be IPv4.' + item_type: Api::Type::String - !ruby/object:Api::Type::NestedObject name: 'custom' description: | diff --git a/mmv1/products/accesscontextmanager/AccessLevelCondition.yaml b/mmv1/products/accesscontextmanager/AccessLevelCondition.yaml index e8fcca417b03..13106afd628a 100644 --- a/mmv1/products/accesscontextmanager/AccessLevelCondition.yaml +++ b/mmv1/products/accesscontextmanager/AccessLevelCondition.yaml @@ -212,3 +212,20 @@ properties: countries/regions. Format: A valid ISO 3166-1 alpha-2 code. item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'vpcNetworkSources' + description: 'The request must originate from one of the provided VPC networks in Google Cloud. Cannot specify this field together with `ip_subnetworks`.' + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'vpcSubnetwork' + description: 'Sub networks within a VPC network.' + properties: + - !ruby/object:Api::Type::String + name: 'network' + required: true + description: 'Required. Network name to be allowed by this Access Level. Networks of foreign organizations requires `compute.network.get` permission to be granted to caller.' + - !ruby/object:Api::Type::Array + name: 'vpcIpSubnetworks' + description: 'CIDR block IP subnetwork specification. Must be IPv4.' + item_type: Api::Type::String diff --git a/mmv1/products/accesscontextmanager/AccessLevels.yaml b/mmv1/products/accesscontextmanager/AccessLevels.yaml index 2a45c3cc0563..ab2c88492f72 100644 --- a/mmv1/products/accesscontextmanager/AccessLevels.yaml +++ b/mmv1/products/accesscontextmanager/AccessLevels.yaml @@ -234,6 +234,23 @@ properties: countries/regions. Format: A valid ISO 3166-1 alpha-2 code. item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'vpcNetworkSources' + description: 'The request must originate from one of the provided VPC networks in Google Cloud. Cannot specify this field together with `ip_subnetworks`.' + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'vpcSubnetwork' + description: 'Sub networks within a VPC network.' + properties: + - !ruby/object:Api::Type::String + name: 'network' + required: true + description: 'Required. Network name to be allowed by this Access Level. Networks of foreign organizations requires `compute.network.get` permission to be granted to caller.' + - !ruby/object:Api::Type::Array + name: 'vpcIpSubnetworks' + description: 'CIDR block IP subnetwork specification. Must be IPv4.' + item_type: Api::Type::String - !ruby/object:Api::Type::NestedObject name: 'custom' description: | diff --git a/mmv1/products/accesscontextmanager/ServicePerimeter.yaml b/mmv1/products/accesscontextmanager/ServicePerimeter.yaml index 96c2bf0b5b86..b79c83c666cf 100644 --- a/mmv1/products/accesscontextmanager/ServicePerimeter.yaml +++ b/mmv1/products/accesscontextmanager/ServicePerimeter.yaml @@ -157,6 +157,7 @@ properties: - status.0.access_levels - status.0.restricted_services item_type: Api::Type::String + is_set: true - !ruby/object:Api::Type::Array name: 'accessLevels' description: | @@ -175,6 +176,7 @@ properties: - status.0.access_levels - status.0.restricted_services item_type: Api::Type::String + is_set: true - !ruby/object:Api::Type::Array name: 'restrictedServices' description: | @@ -236,6 +238,7 @@ properties: - !ruby/object:Api::Type::Array name: 'identities' item_type: Api::Type::String + is_set: true description: | A list of identities that are allowed access through this ingress policy. Should be in the format of email address. The email address should represent @@ -275,6 +278,7 @@ properties: - !ruby/object:Api::Type::Array name: 'resources' item_type: Api::Type::String + is_set: true description: | A list of resources, currently only projects in the form `projects/`, protected by this `ServicePerimeter` @@ -342,12 +346,28 @@ properties: - :ANY_IDENTITY - :ANY_USER_ACCOUNT - :ANY_SERVICE_ACCOUNT + - !ruby/object:Api::Type::Array + name: 'sources' + description: 'Sources that this EgressPolicy authorizes access from.' + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'accessLevel' + description: 'An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.' + - !ruby/object:Api::Type::Enum + name: 'sourceRestriction' + description: 'Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`.' + values: + - :SOURCE_RESTRICTION_UNSPECIFIED + - :SOURCE_RESTRICTION_ENABLED + - :SOURCE_RESTRICTION_DISABLED - !ruby/object:Api::Type::Array name: 'identities' description: | A list of identities that are allowed access through this `EgressPolicy`. Should be in the format of email address. The email address should represent individual user or service account only. + is_set: true item_type: Api::Type::String - !ruby/object:Api::Type::NestedObject name: 'egressTo' @@ -357,6 +377,7 @@ properties: properties: - !ruby/object:Api::Type::Array name: 'resources' + is_set: true item_type: Api::Type::String description: | A list of resources, currently only projects in the form @@ -366,6 +387,7 @@ properties: the perimeter. - !ruby/object:Api::Type::Array name: 'externalResources' + is_set: true item_type: Api::Type::String description: | A list of external resources that are allowed to be accessed. A request @@ -423,6 +445,7 @@ properties: - spec.0.access_levels - spec.0.restricted_services item_type: Api::Type::String + is_set: true - !ruby/object:Api::Type::Array name: 'accessLevels' description: | @@ -441,6 +464,7 @@ properties: - spec.0.access_levels - spec.0.restricted_services item_type: Api::Type::String + is_set: true - !ruby/object:Api::Type::Array name: 'restrictedServices' description: | @@ -454,6 +478,7 @@ properties: - spec.0.access_levels - spec.0.restricted_services item_type: Api::Type::String + is_set: true - !ruby/object:Api::Type::NestedObject name: 'vpcAccessibleServices' description: | @@ -471,6 +496,7 @@ properties: The list of APIs usable within the Service Perimeter. Must be empty unless `enableRestriction` is True. item_type: Api::Type::String + is_set: true - !ruby/object:Api::Type::Array name: 'ingressPolicies' description: | @@ -500,6 +526,7 @@ properties: - !ruby/object:Api::Type::Array name: 'identities' item_type: Api::Type::String + is_set: true description: | A list of identities that are allowed access through this ingress policy. Should be in the format of email address. The email address should represent @@ -539,6 +566,7 @@ properties: - !ruby/object:Api::Type::Array name: 'resources' item_type: Api::Type::String + is_set: true description: | A list of resources, currently only projects in the form `projects/`, protected by this `ServicePerimeter` @@ -606,6 +634,21 @@ properties: - :ANY_IDENTITY - :ANY_USER_ACCOUNT - :ANY_SERVICE_ACCOUNT + - !ruby/object:Api::Type::Array + name: 'sources' + description: 'Sources that this EgressPolicy authorizes access from.' + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'accessLevel' + description: 'An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.' + - !ruby/object:Api::Type::Enum + name: 'sourceRestriction' + description: 'Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`.' + values: + - :SOURCE_RESTRICTION_UNSPECIFIED + - :SOURCE_RESTRICTION_ENABLED + - :SOURCE_RESTRICTION_DISABLED - !ruby/object:Api::Type::Array name: 'identities' description: | @@ -613,6 +656,7 @@ properties: Should be in the format of email address. The email address should represent individual user or service account only. item_type: Api::Type::String + is_set: true - !ruby/object:Api::Type::NestedObject name: 'egressTo' description: | @@ -622,6 +666,7 @@ properties: - !ruby/object:Api::Type::Array name: 'resources' item_type: Api::Type::String + is_set: true description: | A list of resources, currently only projects in the form `projects/`, that match this to stanza. A request matches @@ -631,6 +676,7 @@ properties: - !ruby/object:Api::Type::Array name: 'externalResources' item_type: Api::Type::String + is_set: true description: | A list of external resources that are allowed to be accessed. A request matches if it contains an external resource in this list (Example: diff --git a/mmv1/products/accesscontextmanager/ServicePerimeterEgressPolicy.yaml b/mmv1/products/accesscontextmanager/ServicePerimeterEgressPolicy.yaml index 8ff821cb691b..5e46e6770c0d 100644 --- a/mmv1/products/accesscontextmanager/ServicePerimeterEgressPolicy.yaml +++ b/mmv1/products/accesscontextmanager/ServicePerimeterEgressPolicy.yaml @@ -79,6 +79,21 @@ properties: Should be in the format of email address. The email address should represent individual user or service account only. item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'sources' + description: 'Sources that this EgressPolicy authorizes access from.' + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'accessLevel' + description: 'An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.' + - !ruby/object:Api::Type::Enum + name: 'sourceRestriction' + description: 'Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`.' + values: + - :SOURCE_RESTRICTION_UNSPECIFIED + - :SOURCE_RESTRICTION_ENABLED + - :SOURCE_RESTRICTION_DISABLED - !ruby/object:Api::Type::NestedObject name: 'egressTo' description: | diff --git a/mmv1/products/accesscontextmanager/ServicePerimeters.yaml b/mmv1/products/accesscontextmanager/ServicePerimeters.yaml index 72a41e289a0d..2b941289d41a 100644 --- a/mmv1/products/accesscontextmanager/ServicePerimeters.yaml +++ b/mmv1/products/accesscontextmanager/ServicePerimeters.yaml @@ -57,7 +57,6 @@ properties: name: 'servicePerimeters' description: | The desired Service Perimeters that should replace all existing Service Perimeters in the Access Policy. - is_set: true item_type: !ruby/object:Api::Type::NestedObject properties: - !ruby/object:Api::Type::String @@ -133,6 +132,7 @@ properties: # - status.0.access_levels # - status.0.restricted_services item_type: Api::Type::String + is_set: true - !ruby/object:Api::Type::Array name: 'accessLevels' description: | @@ -153,6 +153,7 @@ properties: # - status.0.access_levels # - status.0.restricted_services item_type: Api::Type::String + is_set: true - !ruby/object:Api::Type::Array name: 'restrictedServices' description: | @@ -194,6 +195,7 @@ properties: have multiple `IngressPolicies`, each of which is evaluated separately. Access is granted if any `Ingress Policy` grants it. Must be empty for a perimeter bridge. + is_set: true item_type: !ruby/object:Api::Type::NestedObject properties: - !ruby/object:Api::Type::NestedObject @@ -215,6 +217,7 @@ properties: - :ANY_SERVICE_ACCOUNT - !ruby/object:Api::Type::Array name: 'identities' + is_set: true item_type: Api::Type::String description: | A list of identities that are allowed access through this ingress policy. @@ -255,6 +258,7 @@ properties: - !ruby/object:Api::Type::Array name: 'resources' item_type: Api::Type::String + is_set: true description: | A list of resources, currently only projects in the form `projects/`, protected by this `ServicePerimeter` @@ -328,7 +332,23 @@ properties: A list of identities that are allowed access through this `EgressPolicy`. Should be in the format of email address. The email address should represent individual user or service account only. + is_set: true item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'sources' + description: 'Sources that this EgressPolicy authorizes access from.' + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'accessLevel' + description: 'An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.' + - !ruby/object:Api::Type::Enum + name: 'sourceRestriction' + description: 'Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`.' + values: + - :SOURCE_RESTRICTION_UNSPECIFIED + - :SOURCE_RESTRICTION_ENABLED + - :SOURCE_RESTRICTION_DISABLED - !ruby/object:Api::Type::NestedObject name: 'egressTo' description: | @@ -338,6 +358,7 @@ properties: - !ruby/object:Api::Type::Array name: 'resources' item_type: Api::Type::String + is_set: true description: | A list of resources, currently only projects in the form `projects/`, that match this to stanza. A request matches @@ -347,6 +368,7 @@ properties: - !ruby/object:Api::Type::Array name: 'externalResources' item_type: Api::Type::String + is_set: true description: | A list of external resources that are allowed to be accessed. A request matches if it contains an external resource in this list (Example: @@ -404,9 +426,11 @@ properties: # - spec.0.resources # - spec.0.access_levels # - spec.0.restricted_services + is_set: true item_type: Api::Type::String - !ruby/object:Api::Type::Array name: 'accessLevels' + is_set: true description: | A list of AccessLevel resource names that allow resources within the ServicePerimeter to be accessed from the internet. @@ -440,6 +464,7 @@ properties: # - spec.0.access_levels # - spec.0.restricted_services item_type: Api::Type::String + is_set: true - !ruby/object:Api::Type::NestedObject name: 'vpcAccessibleServices' description: | @@ -457,6 +482,7 @@ properties: The list of APIs usable within the Service Perimeter. Must be empty unless `enableRestriction` is True. item_type: Api::Type::String + is_set: true - !ruby/object:Api::Type::Array name: 'ingressPolicies' description: | @@ -485,6 +511,7 @@ properties: - :ANY_SERVICE_ACCOUNT - !ruby/object:Api::Type::Array name: 'identities' + is_set: true item_type: Api::Type::String description: | A list of identities that are allowed access through this ingress policy. @@ -525,6 +552,7 @@ properties: - !ruby/object:Api::Type::Array name: 'resources' item_type: Api::Type::String + is_set: true description: | A list of resources, currently only projects in the form `projects/`, protected by this `ServicePerimeter` @@ -599,6 +627,22 @@ properties: Should be in the format of email address. The email address should represent individual user or service account only. item_type: Api::Type::String + is_set: true + - !ruby/object:Api::Type::Array + name: 'sources' + description: 'Sources that this EgressPolicy authorizes access from.' + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'accessLevel' + description: 'An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.' + - !ruby/object:Api::Type::Enum + name: 'sourceRestriction' + description: 'Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`.' + values: + - :SOURCE_RESTRICTION_UNSPECIFIED + - :SOURCE_RESTRICTION_ENABLED + - :SOURCE_RESTRICTION_DISABLED - !ruby/object:Api::Type::NestedObject name: 'egressTo' description: | @@ -608,6 +652,7 @@ properties: - !ruby/object:Api::Type::Array name: 'resources' item_type: Api::Type::String + is_set: true description: | A list of resources, currently only projects in the form `projects/`, that match this to stanza. A request matches @@ -617,6 +662,7 @@ properties: - !ruby/object:Api::Type::Array name: 'externalResources' item_type: Api::Type::String + is_set: true description: | A list of external resources that are allowed to be accessed. A request matches if it contains an external resource in this list (Example: diff --git a/mmv1/products/activedirectory/Domain.yaml b/mmv1/products/activedirectory/Domain.yaml index 8b0cc3e78b33..7ca646ef3565 100644 --- a/mmv1/products/activedirectory/Domain.yaml +++ b/mmv1/products/activedirectory/Domain.yaml @@ -85,7 +85,7 @@ properties: description: 'The unique name of the domain using the format: `projects/{project}/locations/global/domains/{domainName}`.' - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: 'Resource labels that can contain user-provided metadata' - !ruby/object:Api::Type::Array diff --git a/mmv1/products/activedirectory/Peering.yaml b/mmv1/products/activedirectory/Peering.yaml index b98c94b7ac59..b760bb8ef788 100644 --- a/mmv1/products/activedirectory/Peering.yaml +++ b/mmv1/products/activedirectory/Peering.yaml @@ -73,7 +73,7 @@ properties: output: true description: | Unique name of the peering in this scope including projects and location using the form: projects/{projectId}/locations/global/peerings/{peeringId}. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: 'Resource labels that can contain user-provided metadata' - !ruby/object:Api::Type::String diff --git a/mmv1/products/alloydb/Backup.yaml b/mmv1/products/alloydb/Backup.yaml index 3d4ac4cbf1c8..a81dbc2d1387 100644 --- a/mmv1/products/alloydb/Backup.yaml +++ b/mmv1/products/alloydb/Backup.yaml @@ -37,29 +37,57 @@ examples: - !ruby/object:Provider::Terraform::Examples name: 'alloydb_backup_basic' primary_resource_id: 'default' + vars: + alloydb_backup_id: 'alloydb-backup' + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + network_name: 'alloydb-network' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_test: true + - !ruby/object:Provider::Terraform::Examples + name: 'alloydb_backup_basic_test' + primary_resource_id: 'default' vars: alloydb_backup_id: 'alloydb-backup' alloydb_cluster_name: 'alloydb-cluster' alloydb_instance_name: 'alloydb-instance' network_name: 'alloydb-network' test_vars_overrides: - network_name: 'acctest.BootstrapSharedTestNetwork(t, "alloydb-backup-basic")' + network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' ignore_read_extra: - 'reconciling' - 'update_time' + skip_docs: true - !ruby/object:Provider::Terraform::Examples name: 'alloydb_backup_full' primary_resource_id: 'default' + vars: + alloydb_backup_id: 'alloydb-backup' + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + network_name: 'alloydb-network' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_test: true + - !ruby/object:Provider::Terraform::Examples + name: 'alloydb_backup_full_test' + primary_resource_id: 'default' vars: alloydb_backup_id: 'alloydb-backup' alloydb_cluster_name: 'alloydb-cluster' alloydb_instance_name: 'alloydb-instance' network_name: 'alloydb-network' test_vars_overrides: - network_name: 'acctest.BootstrapSharedTestNetwork(t, "alloydb-backup-full")' + network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' ignore_read_extra: - 'reconciling' - 'update_time' + # https://github.com/hashicorp/terraform-provider-google/issues/16231 + skip_vcr: true + skip_docs: true custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: templates/terraform/encoders/alloydb_backup.erb parameters: @@ -83,52 +111,71 @@ properties: output: true description: | Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backupId} + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User-settable and human-readable display name for the Backup. - !ruby/object:Api::Type::String name: 'uid' output: true description: | Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. - - !ruby/object:Api::Type::String - name: 'clusterName' - description: - 'The full resource name of the backup source cluster (e.g., - projects/{project}/locations/{location}/clusters/{clusterId}).' - required: true - immutable: true - diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' - - !ruby/object:Api::Type::KeyValuePairs - name: 'labels' - description: 'User-defined labels for the alloydb backup.' - !ruby/object:Api::Type::Time name: 'createTime' - description: | - Time the Backup was created in UTC. output: true + description: | + Output only. Create time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". - !ruby/object:Api::Type::Time name: 'updateTime' + output: true description: | - Time the Backup was updated in UTC. + Output only. Update time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + - !ruby/object:Api::Type::Time + name: 'deleteTime' output: true + description: | + Output only. Delete time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + - !ruby/object:Api::Type::KeyValueLabels + name: 'labels' + description: | + User-defined labels for the alloydb backup. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - !ruby/object:Api::Type::String name: 'state' output: true - description: | - The current state of the backup. + description: Output only. The current state of the backup. + - !ruby/object:Api::Type::Enum + name: 'type' + default_from_api: true + description: 'The backup type, which suggests the trigger for the backup.' + values: + - :TYPE_UNSPECIFIED + - :ON_DEMAND + - :AUTOMATED + - :CONTINUOUS - !ruby/object:Api::Type::String name: 'description' - description: | - User-provided description of the backup. + description: 'User-provided description of the backup.' + - !ruby/object:Api::Type::String + name: 'clusterUid' + output: true + description: 'Output only. The system-generated UID of the cluster which was used to create this resource.' + - !ruby/object:Api::Type::String + name: 'clusterName' + required: true immutable: true + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + description: + 'The full resource name of the backup source cluster (e.g., + projects/{project}/locations/{location}/clusters/{clusterId}).' - !ruby/object:Api::Type::Boolean name: 'reconciling' output: true description: | - If true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance. - - !ruby/object:Api::Type::String - name: 'etag' - description: | - A hash of the resource. - output: true + Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. + This can happen due to user-triggered updates or system actions like failover or maintenance. - !ruby/object:Api::Type::NestedObject name: 'encryptionConfig' description: | @@ -136,26 +183,62 @@ properties: properties: - !ruby/object:Api::Type::String name: 'kmsKeyName' + immutable: true description: | The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME]. - immutable: true - !ruby/object:Api::Type::NestedObject name: 'encryptionInfo' + output: true description: | EncryptionInfo describes the encryption information of a cluster or a backup. - output: true properties: - !ruby/object:Api::Type::Enum name: 'encryptionType' + output: true description: 'Output only. Type of encryption.' values: - :TYPE_UNSPECIFIED - :GOOGLE_DEFAULT_ENCRYPTION - :CUSTOMER_MANAGED_ENCRYPTION - output: true - !ruby/object:Api::Type::Array name: kmsKeyVersions item_type: Api::Type::String + output: true description: | Output only. Cloud KMS key versions that are being used to protect the database or the backup. + - !ruby/object:Api::Type::String + name: 'etag' + output: true + description: 'For Resource freshness validation (https://google.aip.dev/154)' + - !ruby/object:Api::Type::KeyValueAnnotations + name: 'annotations' + description: | + Annotations to allow client tools to store small amount of arbitrary data. This is distinct from labels. https://google.aip.dev/128 + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + - !ruby/object:Api::Type::String + name: 'sizeBytes' + output: true + description: 'Output only. The size of the backup in bytes.' + - !ruby/object:Api::Type::String + name: 'expiryTime' + output: true + description: | + Output only. The time at which after the backup is eligible to be garbage collected. + It is the duration specified by the backup's retention policy, added to the backup's createTime. + - !ruby/object:Api::Type::NestedObject + name: 'expiryQuantity' + output: true + description: | + Output only. The QuantityBasedExpiry of the backup, specified by the backup's retention policy. + Once the expiry quantity is over retention, the backup is eligible to be garbage collected. + properties: + - !ruby/object:Api::Type::Integer + name: 'retentionCount' + output: true + description: | + Output only. The backup's position among its backups with the same source cluster and type, by descending chronological order create time (i.e. newest first). + - !ruby/object:Api::Type::Integer + name: 'totalRetentionCount' output: true + description: | + Output only. The length of the quantity-based queue, specified by the backup's retention policy. diff --git a/mmv1/products/alloydb/Cluster.yaml b/mmv1/products/alloydb/Cluster.yaml index cf597bad4d43..63c7a537893f 100644 --- a/mmv1/products/alloydb/Cluster.yaml +++ b/mmv1/products/alloydb/Cluster.yaml @@ -29,9 +29,9 @@ async: !ruby/object:Api::OpAsync base_url: '{{op_id}}' wait_ms: 1000 timeouts: !ruby/object:Api::Timeouts - insert_minutes: 10 - update_minutes: 10 - delete_minutes: 10 + insert_minutes: 30 + update_minutes: 30 + delete_minutes: 30 result: !ruby/object:Api::OpAsync::Result path: 'response' status: !ruby/object:Api::OpAsync::Status @@ -78,8 +78,29 @@ examples: ignore_read_extra: - 'reconciling' - 'update_time' + - !ruby/object:Provider::Terraform::Examples + name: 'alloydb_secondary_cluster_basic' + primary_resource_id: 'secondary' + vars: + alloydb_primary_cluster_name: 'alloydb-primary-cluster' + alloydb_primary_instance_name: 'alloydb-primary-instance' + alloydb_secondary_cluster_name: 'alloydb-secondary-cluster' + skip_test: true + - !ruby/object:Provider::Terraform::Examples + name: 'alloydb_secondary_cluster_basic_test' + primary_resource_id: 'secondary' + vars: + alloydb_primary_cluster_name: 'alloydb-primary-cluster' + alloydb_primary_instance_name: 'alloydb-primary-instance' + alloydb_secondary_cluster_name: 'alloydb-secondary-cluster' + network_name: 'alloydb-network' + test_vars_overrides: + network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' + skip_docs: true custom_code: !ruby/object:Provider::Terraform::CustomCode - pre_create: templates/terraform/pre_create/alloydb_restore_cluster.go.erb + pre_create: templates/terraform/pre_create/alloydb_cluster.go.erb + pre_update: templates/terraform/pre_update/alloydb_cluster.go.erb + pre_delete: templates/terraform/pre_delete/alloydb_cluster.go.erb parameters: - !ruby/object:Api::Type::String name: 'clusterId' @@ -106,7 +127,7 @@ properties: output: true description: | The system-generated UID of the resource. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: 'User-defined labels for the alloydb cluster.' - !ruby/object:Api::Type::NestedObject @@ -179,16 +200,60 @@ properties: output: true - !ruby/object:Api::Type::String name: 'network' - required: true + exactly_one_of: + - network + - network_config.0.network + default_from_api: true + deprecation_message: >- + `network` is deprecated and will be removed in a future major release. Instead, use `network_config` to define the network configuration. description: | The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}". diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + - !ruby/object:Api::Type::NestedObject + name: 'networkConfig' + description: | + Metadata related to network configuration. + default_from_api: true + properties: + - !ruby/object:Api::Type::String + name: network + exactly_one_of: + - network + - network_config.0.network + description: | + The resource link for the VPC network in which cluster resources are created and from which they are accessible via Private IP. The network must belong to the same project as the cluster. + It is specified in the form: "projects/{projectNumber}/global/networks/{network_id}". + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + - !ruby/object:Api::Type::String + name: allocatedIpRange + description: | + The name of the allocated IP range for the private IP AlloyDB cluster. For example: "google-managed-services-default". + If set, the instance IPs for this cluster will be created in the allocated range. - !ruby/object:Api::Type::String name: 'displayName' description: | User-settable and human-readable display name for the Cluster. + - !ruby/object:Api::Type::String + name: 'etag' + description: 'For Resource freshness validation (https://google.aip.dev/154)' + - !ruby/object:Api::Type::Boolean + name: 'reconciling' + output: true + description: | + Output only. Reconciling (https://google.aip.dev/128#reconciliation). + Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. + This can happen due to user-triggered updates or system actions like failover or maintenance. + - !ruby/object:Api::Type::String + name: 'state' + output: true + description: 'Output only. The current serving state of the cluster.' + - !ruby/object:Api::Type::KeyValueAnnotations + name: 'annotations' + description: | + Annotations to allow client tools to store small amount of arbitrary data. This is distinct from labels. https://google.aip.dev/128 + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - !ruby/object:Api::Type::String name: 'databaseVersion' output: true @@ -318,13 +383,11 @@ properties: - !ruby/object:Api::Type::Array name: 'daysOfWeek' min_size: 1 - description: | - The days of the week to perform a backup. At least one day of the week must be provided. + description: 'The days of the week to perform a backup. At least one day of the week must be provided.' item_type: !ruby/object:Api::Type::Enum name: 'daysOfWeek' required: true - description: | - The days of the week to perform a backup. At least one day of the week must be provided. + description: 'The days of the week to perform a backup. At least one day of the week must be provided.' values: - :MONDAY - :TUESDAY @@ -410,3 +473,33 @@ properties: - !ruby/object:Api::Type::String name: 'sourceType' description: 'Type of migration source.' + - !ruby/object:Api::Type::Enum + name: clusterType + values: + - :PRIMARY + - :SECONDARY + default_value: :PRIMARY + description: | + The type of cluster. If not set, defaults to PRIMARY. + - !ruby/object:Api::Type::NestedObject + name: "secondaryConfig" + description: | + Configuration of the secondary cluster for Cross Region Replication. This should be set if and only if the cluster is of type SECONDARY. + properties: + - !ruby/object:Api::Type::String + name: "primaryClusterName" + required: true + description: | + Name of the primary cluster must be in the format + 'projects/{project}/locations/{location}/clusters/{cluster_id}' +virtual_fields: + - !ruby/object:Api::Type::Enum + name: 'deletion_policy' + description: | + Policy to determine if the cluster should be deleted forcefully. + Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. + Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + values: + - :DEFAULT + - :FORCE + default_value: :DEFAULT diff --git a/mmv1/products/alloydb/Instance.yaml b/mmv1/products/alloydb/Instance.yaml index a8929b3965c7..9f71e922bb9e 100644 --- a/mmv1/products/alloydb/Instance.yaml +++ b/mmv1/products/alloydb/Instance.yaml @@ -13,6 +13,11 @@ --- !ruby/object:Api::Resource name: 'Instance' +docs: !ruby/object:Provider::Terraform::Docs + warning: | + Deleting an instance with instanceType = SECONDARY does not delete the secondary instance, and abandons it instead. + Use deletion_policy = "FORCE" in the associated secondary cluster and delete the cluster forcefully to delete the secondary cluster as well its associated secondary instance. + Users can undo the delete secondary instance action by importing the deleted secondary instance by calling terraform import. self_link: '{{cluster}}/instances/{{instance_id}}' base_url: '{{cluster}}/instances?instanceId={{instance_id}}' update_verb: :PATCH @@ -28,9 +33,9 @@ async: !ruby/object:Api::OpAsync base_url: '{{op_id}}' wait_ms: 1000 timeouts: !ruby/object:Api::Timeouts - insert_minutes: 40 - update_minutes: 40 - delete_minutes: 40 + insert_minutes: 120 + update_minutes: 120 + delete_minutes: 120 result: !ruby/object:Api::OpAsync::Result path: 'response' status: !ruby/object:Api::OpAsync::Status @@ -49,6 +54,8 @@ skip_sweeper: true autogen_async: true custom_code: !ruby/object:Provider::Terraform::CustomCode custom_import: templates/terraform/custom_import/alloydb_instance.go.erb + pre_create: templates/terraform/pre_create/alloydb_instance.go.erb + pre_delete: templates/terraform/pre_delete/alloydb_instance.go.erb examples: - !ruby/object:Provider::Terraform::Examples name: 'alloydb_instance_basic' @@ -57,11 +64,51 @@ examples: alloydb_cluster_name: 'alloydb-cluster' alloydb_instance_name: 'alloydb-instance' network_name: 'alloydb-network' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_test: true + - !ruby/object:Provider::Terraform::Examples + name: 'alloydb_secondary_instance_basic' + primary_resource_id: 'secondary' + vars: + alloydb_primary_cluster_name: 'alloydb-primary-cluster' + alloydb_primary_instance_name: 'alloydb-primary-instance' + alloydb_secondary_cluster_name: 'alloydb-secondary-cluster' + alloydb_secondary_instance_name: 'alloydb-secondary-instance' + network_name: 'alloydb-secondary-network' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_test: true + - !ruby/object:Provider::Terraform::Examples + name: 'alloydb_instance_basic_test' + primary_resource_id: 'default' + vars: + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + network_name: 'alloydb-network' + test_vars_overrides: + network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_docs: true + - !ruby/object:Provider::Terraform::Examples + name: 'alloydb_secondary_instance_basic_test' + primary_resource_id: 'secondary' + vars: + alloydb_primary_cluster_name: 'alloydb-primary-cluster' + alloydb_primary_instance_name: 'alloydb-primary-instance' + alloydb_secondary_cluster_name: 'alloydb-secondary-cluster' + alloydb_secondary_instance_name: 'alloydb-secondary-instance' + network_name: 'alloydb-secondary-network' test_vars_overrides: - network_name: 'acctest.BootstrapSharedTestNetwork(t, "alloydb-instance-basic")' + network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' ignore_read_extra: - 'reconciling' - 'update_time' + skip_docs: true parameters: - !ruby/object:Api::Type::ResourceRef name: 'cluster' @@ -101,10 +148,10 @@ properties: output: true description: | The system-generated UID of the resource. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: 'User-defined labels for the alloydb instance.' - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueAnnotations name: 'annotations' description: 'Annotations to allow client tools to store small amount of arbitrary @@ -155,15 +202,39 @@ properties: required: true immutable: true description: | - The type of the instance. If the instance type is READ_POOL, provide the associated PRIMARY instance in the `depends_on` meta-data attribute. + The type of the instance. + If the instance type is READ_POOL, provide the associated PRIMARY/SECONDARY instance in the `depends_on` meta-data attribute. + If the instance type is SECONDARY, point to the cluster_type of the associated secondary cluster instead of mentioning SECONDARY. + Example: {instance_type = google_alloydb_cluster..cluster_type} instead of {instance_type = SECONDARY} + If the instance type is SECONDARY, the terraform delete instance operation does not delete the secondary instance but abandons it instead. + Use deletion_policy = "FORCE" in the associated secondary cluster and delete the cluster forcefully to delete the secondary cluster as well its associated secondary instance. + Users can undo the delete secondary instance action by importing the deleted secondary instance by calling terraform import. values: - :PRIMARY - :READ_POOL + - :SECONDARY - !ruby/object:Api::Type::String name: 'ipAddress' output: true description: | The IP address for the Instance. This is the connection endpoint for an end-user application. + - !ruby/object:Api::Type::NestedObject + name: 'queryInsightsConfig' + default_from_api: true + description: 'Configuration for query insights.' + properties: + - !ruby/object:Api::Type::Integer + name: 'queryStringLength' + description: 'Query string length. The default value is 1024. Any integer between 256 and 4500 is considered valid.' + - !ruby/object:Api::Type::Boolean + name: 'recordApplicationTags' + description: 'Record application tags for an instance. This flag is turned "on" by default.' + - !ruby/object:Api::Type::Boolean + name: 'recordClientAddress' + description: 'Record client address for an instance. Client address is PII information. This flag is turned "on" by default.' + - !ruby/object:Api::Type::Integer + name: 'queryPlansPerMinute' + description: 'Number of query execution plans captured by Insights per minute for all queries combined. The default value is 5. Any integer between 0 and 20 is considered valid.' - !ruby/object:Api::Type::NestedObject name: 'readPoolConfig' description: 'Read pool specific config. If the instance type is READ_POOL, this configuration must be provided.' @@ -183,3 +254,26 @@ properties: description: "The number of CPU's in the VM instance." default_from_api: true + - !ruby/object:Api::Type::NestedObject + name: 'clientConnectionConfig' + default_from_api: true + description: | + Client connection specific configurations. + properties: + - !ruby/object:Api::Type::Boolean + name: requireConnectors + description: | + Configuration to enforce connectors only (ex: AuthProxy) connections to the database. + - !ruby/object:Api::Type::NestedObject + name: 'sslConfig' + default_from_api: true + description: | + SSL config option for this instance. + properties: + - !ruby/object:Api::Type::Enum + name: 'sslMode' + default_from_api: true + description: 'SSL mode. Specifies client-server SSL/TLS connection behavior.' + values: + - :ENCRYPTED_ONLY + - :ALLOW_UNENCRYPTED_AND_ENCRYPTED diff --git a/mmv1/products/alloydb/User.yaml b/mmv1/products/alloydb/User.yaml new file mode 100644 index 000000000000..1f3ad0f76698 --- /dev/null +++ b/mmv1/products/alloydb/User.yaml @@ -0,0 +1,124 @@ +# Copyright 2023 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'User' +self_link: '{{cluster}}/users/{{user_id}}' +base_url: '{{cluster}}/users' +create_url: '{{cluster}}/users?userId={{user_id}}' +update_url: '{{cluster}}/users?userId={{user_id}}' +update_verb: :POST +description: 'A database user in an AlloyDB cluster.' +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'AlloyDB': 'https://cloud.google.com/alloydb/docs/' + api: 'https://cloud.google.com/alloydb/docs/reference/rest/v1/projects.locations.clusters.users/create' +import_format: ['projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/users/{{user_id}}'] +skip_sweeper: true +autogen_async: true +custom_code: !ruby/object:Provider::Terraform::CustomCode + custom_import: templates/terraform/custom_import/alloydb_user.go.erb +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'alloydb_user_builtin' + primary_resource_id: 'user1' + vars: + alloydb_cluster_name: 'alloydb-cluster' + alloydb_cluster_pass: 'cluster_secret' + alloydb_instance_name: 'alloydb-instance' + alloydb_user_name: 'user1' + alloydb_user_pass: 'user_secret' + network_name: 'alloydb-network' + ignore_read_extra: + - 'password' + skip_test: true + - !ruby/object:Provider::Terraform::Examples + name: 'alloydb_user_builtin_test' + primary_resource_id: 'user1' + vars: + alloydb_cluster_name: 'alloydb-cluster' + alloydb_cluster_pass: 'cluster_secret' + alloydb_instance_name: 'alloydb-instance' + alloydb_user_name: 'user1' + alloydb_user_pass: 'user_secret' + network_name: 'alloydb-network' + test_vars_overrides: + network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' + ignore_read_extra: + - 'password' + skip_docs: true + - !ruby/object:Provider::Terraform::Examples + name: 'alloydb_user_iam' + primary_resource_id: 'user2' + vars: + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + alloydb_cluster_pass: 'cluster_secret' + alloydb_user_name: 'user2@foo.com' + network_name: 'alloydb-network' + skip_test: true + - !ruby/object:Provider::Terraform::Examples + name: 'alloydb_user_iam_test' + primary_resource_id: 'user2' + vars: + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + alloydb_cluster_pass: 'cluster_secret' + alloydb_user_name: 'user2@foo.com' + network_name: 'alloydb-network' + test_vars_overrides: + network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' + skip_docs: true +parameters: + - !ruby/object:Api::Type::ResourceRef + name: 'cluster' + description: | + Identifies the alloydb cluster. Must be in the format + 'projects/{project}/locations/{location}/clusters/{cluster_id}' + required: true + immutable: true + resource: 'Cluster' + imports: 'name' + url_param_only: true + - !ruby/object:Api::Type::String + name: 'userId' + required: true + immutable: true + url_param_only: true + description: | + The database role name of the user. + - !ruby/object:Api::Type::Enum + name: 'userType' + required: true + immutable: true + description: | + The type of this user. + values: + - :ALLOYDB_BUILT_IN + - :ALLOYDB_IAM_USER +properties: + - !ruby/object:Api::Type::String + name: 'name' + output: true + description: | + Name of the resource in the form of projects/{project}/locations/{location}/clusters/{cluster}/users/{user}. + - !ruby/object:Api::Type::String + name: 'password' + ignore_read: true + description: | + Password for this database user. + - !ruby/object:Api::Type::Array + name: 'databaseRoles' + item_type: Api::Type::String + description: | + List of database roles this database user has. diff --git a/mmv1/products/apigateway/ApiConfig.yaml b/mmv1/products/apigateway/ApiConfig.yaml index 2c795186cbed..da35b85703d9 100644 --- a/mmv1/products/apigateway/ApiConfig.yaml +++ b/mmv1/products/apigateway/ApiConfig.yaml @@ -120,7 +120,7 @@ properties: output: true description: | The ID of the associated Service Config (https://cloud.google.com/service-infrastructure/docs/glossary#config). - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Resource labels to represent user-provided metadata. diff --git a/mmv1/products/apigateway/ApiResource.yaml b/mmv1/products/apigateway/ApiResource.yaml index f295dd636fee..93ae56e5e75b 100644 --- a/mmv1/products/apigateway/ApiResource.yaml +++ b/mmv1/products/apigateway/ApiResource.yaml @@ -87,7 +87,7 @@ properties: name: 'createTime' description: Creation timestamp in RFC3339 text format. output: true - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Resource labels to represent user-provided metadata. diff --git a/mmv1/products/apigateway/Gateway.yaml b/mmv1/products/apigateway/Gateway.yaml index 5ce68c7f3315..9dc7b3b17b02 100644 --- a/mmv1/products/apigateway/Gateway.yaml +++ b/mmv1/products/apigateway/Gateway.yaml @@ -102,7 +102,7 @@ properties: description: The default API Gateway host name of the form {gatewayId}-{hash}.{region_code}.gateway.dev. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Resource labels to represent user-provided metadata. diff --git a/mmv1/products/apigee/Environment.yaml b/mmv1/products/apigee/Environment.yaml index 83ca01541f8f..c9b343dc4a7b 100644 --- a/mmv1/products/apigee/Environment.yaml +++ b/mmv1/products/apigee/Environment.yaml @@ -87,7 +87,7 @@ examples: # Resource creation race skip_vcr: true - !ruby/object:Provider::Terraform::Examples - name: 'apigee_environment_nodeconfig_test' + name: 'apigee_environment_type_test' primary_resource_id: 'apigee_environment' primary_resource_name: "fmt.Sprintf(\"organizations/tf-test%s\", context[\"random_suffix\"\ @@ -184,3 +184,16 @@ properties: The current total number of gateway nodes that each environment currently has across all instances. output: true + - !ruby/object:Api::Type::Enum + name: 'type' + description: | + Types that can be selected for an Environment. Each of the types are + limited by capability and capacity. Refer to Apigee's public documentation + to understand about each of these types in details. + An Apigee org can support heterogeneous Environments. + default_from_api: true + values: + - 'ENVIRONMENT_TYPE_UNSPECIFIED' + - 'BASE' + - 'INTERMEDIATE' + - 'COMPREHENSIVE' diff --git a/mmv1/products/apigee/Organization.yaml b/mmv1/products/apigee/Organization.yaml index e6c0d808b8ad..912d95511a39 100644 --- a/mmv1/products/apigee/Organization.yaml +++ b/mmv1/products/apigee/Organization.yaml @@ -60,6 +60,8 @@ examples: test_env_vars: org_id: :ORG_ID billing_account: :BILLING_ACCT + ignore_read_extra: + - properties skip_docs: true # Resource creation race @@ -76,6 +78,8 @@ examples: test_env_vars: org_id: :ORG_ID billing_account: :BILLING_ACCT + ignore_read_extra: + - properties skip_docs: true # Resource creation race diff --git a/mmv1/products/artifactregistry/Repository.yaml b/mmv1/products/artifactregistry/Repository.yaml index 4a7e32d73a1c..8eda7c11a07f 100644 --- a/mmv1/products/artifactregistry/Repository.yaml +++ b/mmv1/products/artifactregistry/Repository.yaml @@ -82,6 +82,18 @@ examples: vars: repository_id: 'my-repository' description: 'example remote docker repository' + - !ruby/object:Provider::Terraform::Examples + name: 'artifact_registry_repository_remote_apt' + primary_resource_id: 'my-repo' + vars: + repository_id: 'debian-buster' + description: 'example remote apt repository' + - !ruby/object:Provider::Terraform::Examples + name: 'artifact_registry_repository_remote_yum' + primary_resource_id: 'my-repo' + vars: + repository_id: 'centos-8' + description: 'example remote yum repository' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_cleanup' min_version: beta @@ -131,7 +143,7 @@ properties: name: description description: |- The user-provided description of the repository. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Labels with user-defined metadata. @@ -328,13 +340,49 @@ properties: description: |- The description of the remote source. immutable: true + - !ruby/object:Api::Type::NestedObject + name: 'aptRepository' + exactly_one_of: + - remoteRepositoryConfig.0.apt_repository + - remoteRepositoryConfig.0.docker_repository + - remoteRepositoryConfig.0.maven_repository + - remoteRepositoryConfig.0.npm_repository + - remoteRepositoryConfig.0.python_repository + - remoteRepositoryConfig.0.yum_repository + description: |- + Specific settings for an Apt remote repository. + immutable: true + properties: + - !ruby/object:Api::Type::NestedObject + name: 'publicRepository' + description: |- + One of the publicly available Apt repositories supported by Artifact Registry. + immutable: true + properties: + - !ruby/object:Api::Type::Enum + name: 'repositoryBase' + required: true + description: |- + A common public repository base for Apt, e.g. `"debian/dists/buster"` + immutable: true + values: + - :DEBIAN + - :UBUNTU + - !ruby/object:Api::Type::String + name: 'repositoryPath' + required: true + description: |- + Specific repository from the base. + immutable: true - !ruby/object:Api::Type::NestedObject name: 'dockerRepository' exactly_one_of: + - remoteRepositoryConfig.0.apt_repository - remoteRepositoryConfig.0.docker_repository - remoteRepositoryConfig.0.maven_repository - remoteRepositoryConfig.0.npm_repository - remoteRepositoryConfig.0.python_repository + - remoteRepositoryConfig.0.yum_repository description: |- Specific settings for a Docker remote repository. immutable: true @@ -352,10 +400,12 @@ properties: - !ruby/object:Api::Type::NestedObject name: 'mavenRepository' exactly_one_of: + - remoteRepositoryConfig.0.apt_repository - remoteRepositoryConfig.0.docker_repository - remoteRepositoryConfig.0.maven_repository - remoteRepositoryConfig.0.npm_repository - remoteRepositoryConfig.0.python_repository + - remoteRepositoryConfig.0.yum_repository description: |- Specific settings for a Maven remote repository. immutable: true @@ -373,10 +423,12 @@ properties: - !ruby/object:Api::Type::NestedObject name: 'npmRepository' exactly_one_of: + - remoteRepositoryConfig.0.apt_repository - remoteRepositoryConfig.0.docker_repository - remoteRepositoryConfig.0.maven_repository - remoteRepositoryConfig.0.npm_repository - remoteRepositoryConfig.0.python_repository + - remoteRepositoryConfig.0.yum_repository description: |- Specific settings for an Npm remote repository. immutable: true @@ -394,10 +446,12 @@ properties: - !ruby/object:Api::Type::NestedObject name: 'pythonRepository' exactly_one_of: + - remoteRepositoryConfig.0.apt_repository - remoteRepositoryConfig.0.docker_repository - remoteRepositoryConfig.0.maven_repository - remoteRepositoryConfig.0.npm_repository - remoteRepositoryConfig.0.python_repository + - remoteRepositoryConfig.0.yum_repository description: |- Specific settings for a Python remote repository. immutable: true @@ -412,6 +466,45 @@ properties: values: - :PYPI default_value: :PYPI + - !ruby/object:Api::Type::NestedObject + name: 'yumRepository' + exactly_one_of: + - remoteRepositoryConfig.0.apt_repository + - remoteRepositoryConfig.0.docker_repository + - remoteRepositoryConfig.0.maven_repository + - remoteRepositoryConfig.0.npm_repository + - remoteRepositoryConfig.0.python_repository + - remoteRepositoryConfig.0.yum_repository + description: |- + Specific settings for an Yum remote repository. + immutable: true + properties: + - !ruby/object:Api::Type::NestedObject + name: 'publicRepository' + description: |- + One of the publicly available Yum repositories supported by Artifact Registry. + immutable: true + properties: + - !ruby/object:Api::Type::Enum + name: 'repositoryBase' + required: true + description: |- + A common public repository base for Yum. + immutable: true + values: + - :CENTOS + - :CENTOS_DEBUG + - :CENTOS_VAULT + - :CENTOS_STREAM + - :ROCKY + - :EPEL + - !ruby/object:Api::Type::String + name: 'repositoryPath' + required: true + description: |- + Specific repository from the base, e.g. `"centos/8-stream/BaseOS/x86_64/os"` + immutable: true + - !ruby/object:Api::Type::Boolean name: 'cleanupPolicyDryRun' min_version: beta diff --git a/mmv1/products/artifactregistry/VPCSCConfig.yaml b/mmv1/products/artifactregistry/VPCSCConfig.yaml new file mode 100644 index 000000000000..6751669c1bb3 --- /dev/null +++ b/mmv1/products/artifactregistry/VPCSCConfig.yaml @@ -0,0 +1,70 @@ +# Copyright 2023 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'VPCSCConfig' +description: |- + The Artifact Registry VPC SC config that applies to a Project. +references: !ruby/object:Api::Resource::ReferenceLinks + api: 'https://cloud.google.com/artifact-registry/docs/reference/rest/v1/VPCSCConfig' +min_version: beta +docs: !ruby/object:Provider::Terraform::Docs + note: |- + VPC SC configs are automatically created for a given location. Creating a + resource of this type will acquire and update the resource that already + exists at the location. Deleting this resource will remove the config from + your Terraform state but leave the resource as is. +base_url: 'projects/{{project}}/locations/{{location}}/vpcscConfig' +self_link: 'projects/{{project}}/locations/{{location}}/vpcscConfig' +create_url: 'projects/{{project}}/locations/{{location}}/vpcscConfig' +create_verb: :PATCH +update_verb: :PATCH +skip_delete: true +examples: + - !ruby/object:Provider::Terraform::Examples + # Requires VPC SC Policy configured on organization + skip_test: true + name: 'artifact_registry_vpcsc_config' + primary_resource_id: 'my-config' +autogen_async: false +async: !ruby/object:Api::OpAsync + actions: [] + # necessary to compile + operation: !ruby/object:Api::OpAsync::Operation + base_url: '{{op_id}}' +custom_code: !ruby/object:Provider::Terraform::CustomCode + encoder: templates/terraform/encoders/location_from_region.go.erb +parameters: + - !ruby/object:Api::Type::String + name: location + required: false + immutable: true + url_param_only: true + default_from_api: true + description: | + The name of the location this config is located in. + - !ruby/object:Api::Type::String + name: name + output: true + description: |- + The name of the project's VPC SC Config. + Always of the form: projects/{project}/location/{location}/vpcscConfig +properties: + - !ruby/object:Api::Type::Enum + name: vpcscPolicy + min_version: beta + description: |- + The VPC SC policy for project and location. + values: + - :DENY + - :ALLOW diff --git a/mmv1/products/backupdr/ManagementServer.yaml b/mmv1/products/backupdr/ManagementServer.yaml index 74457a21310e..fc70378dae56 100644 --- a/mmv1/products/backupdr/ManagementServer.yaml +++ b/mmv1/products/backupdr/ManagementServer.yaml @@ -39,6 +39,19 @@ examples: managementserver_name: 'ms-console' test_env_vars: project: :PROJECT_NAME + skip_test: true + - !ruby/object:Provider::Terraform::Examples + min_version: beta + name: 'backup_dr_management_server_test' + primary_resource_id: 'ms-console' + vars: + network_name: 'vpc-network' + managementserver_name: 'ms-console' + test_env_vars: + project: :PROJECT_NAME + test_vars_overrides: + network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "vpc-network-1")' + skip_docs: true parameters: - !ruby/object:Api::Type::String name: 'location' diff --git a/mmv1/products/beyondcorp/AppConnection.yaml b/mmv1/products/beyondcorp/AppConnection.yaml index 215bac419709..1ebf2c7368fb 100644 --- a/mmv1/products/beyondcorp/AppConnection.yaml +++ b/mmv1/products/beyondcorp/AppConnection.yaml @@ -88,7 +88,7 @@ properties: name: 'displayName' description: | An arbitrary user-provided name for the AppConnection. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Resource labels to represent user provided metadata. diff --git a/mmv1/products/beyondcorp/AppConnector.yaml b/mmv1/products/beyondcorp/AppConnector.yaml index 20a6b3a11bcc..fb8e23bedbf6 100644 --- a/mmv1/products/beyondcorp/AppConnector.yaml +++ b/mmv1/products/beyondcorp/AppConnector.yaml @@ -82,7 +82,7 @@ properties: name: 'displayName' description: | An arbitrary user-provided name for the AppConnector. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Resource labels to represent user provided metadata. diff --git a/mmv1/products/beyondcorp/AppGateway.yaml b/mmv1/products/beyondcorp/AppGateway.yaml index 1eda5d9eeefe..7d6fc2fc42fe 100644 --- a/mmv1/products/beyondcorp/AppGateway.yaml +++ b/mmv1/products/beyondcorp/AppGateway.yaml @@ -98,7 +98,7 @@ properties: name: 'displayName' description: | An arbitrary user-provided name for the AppGateway. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Resource labels to represent user provided metadata. diff --git a/mmv1/products/bigquery/Dataset.yaml b/mmv1/products/bigquery/Dataset.yaml index 7cd95a4eb7f1..4e8540977f17 100644 --- a/mmv1/products/bigquery/Dataset.yaml +++ b/mmv1/products/bigquery/Dataset.yaml @@ -140,6 +140,11 @@ properties: * `allAuthenticatedUsers`: All authenticated BigQuery users. + - !ruby/object:Api::Type::String + name: 'iamMember' + description: | + Some other type of member that appears in the IAM Policy but isn't a user, + group, domain, or special group. For example: `allUsers` - !ruby/object:Api::Type::String name: 'userByEmail' description: | @@ -290,12 +295,11 @@ properties: name: 'friendlyName' description: A descriptive name for the dataset send_empty_value: true - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | The labels associated with this dataset. You can use these to - organize and group your datasets - default_from_api: true + organize and group your datasets. - !ruby/object:Api::Type::Integer name: 'lastModifiedTime' description: | diff --git a/mmv1/products/bigquery/Job.yaml b/mmv1/products/bigquery/Job.yaml index cf12ae9fdf3d..016f76d03a72 100644 --- a/mmv1/products/bigquery/Job.yaml +++ b/mmv1/products/bigquery/Job.yaml @@ -168,7 +168,7 @@ properties: name: 'jobTimeoutMs' description: | Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | The labels associated with this job. You can use these to organize and group your jobs. diff --git a/mmv1/products/bigquery/Routine.yaml b/mmv1/products/bigquery/Routine.yaml index 054100c13ee9..7e57fd7dd1fb 100644 --- a/mmv1/products/bigquery/Routine.yaml +++ b/mmv1/products/bigquery/Routine.yaml @@ -76,6 +76,7 @@ properties: - !ruby/object:Api::Type::Enum name: 'routineType' immutable: true + required: true description: The type of routine. values: - :SCALAR_FUNCTION diff --git a/mmv1/products/bigquery/Table.yaml b/mmv1/products/bigquery/Table.yaml index ae9c5bb81666..6913bf4f2dbc 100644 --- a/mmv1/products/bigquery/Table.yaml +++ b/mmv1/products/bigquery/Table.yaml @@ -88,7 +88,7 @@ properties: name: 'id' description: 'An opaque ID uniquely identifying the table.' output: true - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | The labels associated with this dataset. You can use these to @@ -175,7 +175,10 @@ properties: name: 'expirationMs' description: | Number of milliseconds for which to keep the storage for a - partition. + partition. If unspecified when the table is created in a dataset + that has `defaultPartitionExpirationMs`, it will inherit + the value of `defaultPartitionExpirationMs` from the dataset. + To specify a unlimited expiration, set the value to 0. - !ruby/object:Api::Type::String name: 'field' description: | diff --git a/mmv1/products/bigqueryconnection/Connection.yaml b/mmv1/products/bigqueryconnection/Connection.yaml index dd483bbcb432..253e6299b96e 100644 --- a/mmv1/products/bigqueryconnection/Connection.yaml +++ b/mmv1/products/bigqueryconnection/Connection.yaml @@ -111,10 +111,10 @@ examples: vars: connection_id: 'my-connection' database: 'projects/project/instances/instance/databases/database' + database_role: 'database_role' - !ruby/object:Provider::Terraform::Examples - name: 'bigquery_connection_cloudspanner_analytics' + name: 'bigquery_connection_cloudspanner_databoost' pull_external: true - skip_docs: true primary_resource_id: 'connection' vars: connection_id: 'my-connection' @@ -299,18 +299,48 @@ properties: - !ruby/object:Api::Type::String name: 'database' description: - Cloud Spanner database in the form `project/instance/database' + Cloud Spanner database in the form `project/instance/database'. required: true - !ruby/object:Api::Type::Boolean name: 'useParallelism' description: - If parallelism should be used when reading from Cloud Spanner + If parallelism should be used when reading from Cloud Spanner. + - !ruby/object:Api::Type::Integer + name: 'maxParallelism' + description: + Allows setting max parallelism per query when executing on Spanner independent compute + resources. If unspecified, default values of parallelism are chosen that are dependent on + the Cloud Spanner instance configuration. `useParallelism` and `useDataBoost` must be set + when setting max parallelism. + required_with: + - cloudSpanner.0.useDataBoost + - cloudSpanner.0.useParallelism + - !ruby/object:Api::Type::Boolean + name: 'useDataBoost' + description: + If set, the request will be executed via Spanner independent compute resources. + `use_parallelism` must be set when using data boost. + required_with: + - cloudSpanner.0.useParallelism + - !ruby/object:Api::Type::String + name: 'databaseRole' + description: + Cloud Spanner database role for fine-grained access control. The Cloud Spanner admin + should have provisioned the database role with appropriate permissions, such as `SELECT` + and `INSERT`. Other users should only use roles provided by their Cloud Spanner admins. + The database role name must start with a letter, and can only contain letters, numbers, + and underscores. For more details, see https://cloud.google.com/spanner/docs/fgac-about. + validation: !ruby/object:Provider::Terraform::Validation + regex: '^[a-zA-Z][a-zA-Z0-9_]*$' - !ruby/object:Api::Type::Boolean name: 'useServerlessAnalytics' description: If the serverless analytics service should be used to read data from - Cloud Spanner. useParallelism must be set when using serverless - analytics + Cloud Spanner. `useParallelism` must be set when using serverless + analytics. + deprecation_message: >- + `useServerlessAnalytics` is deprecated and will be removed in a future major release. Use + `useDataBoost` instead. - !ruby/object:Api::Type::NestedObject name: cloudResource description: diff --git a/mmv1/products/bigquerydatatransfer/Config.yaml b/mmv1/products/bigquerydatatransfer/Config.yaml index aa6c92c8e02c..0a6e540b46c8 100644 --- a/mmv1/products/bigquerydatatransfer/Config.yaml +++ b/mmv1/products/bigquerydatatransfer/Config.yaml @@ -15,7 +15,7 @@ name: 'Config' base_url: projects/{{project}}/locations/{{location}}/transferConfigs?serviceAccountName={{service_account_name}} self_link: '{{name}}' -update_url: "{{name}}?serviceAccountName={{service_account_name}}&updateMask=serviceAccountName,displayName,destinationDatasetId,schedule,scheduleOptions,emailPreferences,notificationPubsubTopic,dataRefreshWindowDays,disabled,params" +update_url: "{{name}}?serviceAccountName={{service_account_name}}" update_verb: :PATCH description: | Represents a data transfer configuration. A transfer configuration @@ -33,6 +33,7 @@ custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: templates/terraform/encoders/bigquery_data_transfer.go.erb custom_import: templates/terraform/custom_import/bigquery_data_transfer_self_link_as_name_set_location.go.erb post_create: templates/terraform/post_create/set_computed_name.erb + pre_update: templates/terraform/pre_update/bigquerydatatransfer_config.erb custom_diff: [ 'sensitiveParamCustomizeDiff', 'paramsCustomizeDiff', diff --git a/mmv1/products/bigqueryreservation/CapacityCommitment.yaml b/mmv1/products/bigqueryreservation/CapacityCommitment.yaml index bd51ff262e37..cc66888d7749 100644 --- a/mmv1/products/bigqueryreservation/CapacityCommitment.yaml +++ b/mmv1/products/bigqueryreservation/CapacityCommitment.yaml @@ -15,7 +15,7 @@ name: 'CapacityCommitment' base_url: projects/{{project}}/locations/{{location}}/capacityCommitments create_url: projects/{{project}}/locations/{{location}}/capacityCommitments?capacityCommitmentId={{capacity_commitment_id}} -self_link: 'projects/{{project}}/locations/{{location}}/capacityCommitments/{{capacity_commitment_id}}' +self_link: '{{name}}' update_verb: :PATCH update_mask: true description: | @@ -26,19 +26,25 @@ references: !ruby/object:Api::Resource::ReferenceLinks guides: 'Introduction to Reservations': 'https://cloud.google.com/bigquery/docs/reservations-intro' api: 'https://cloud.google.com/bigquery/docs/reference/reservations/rest/v1/projects.locations.capacityCommitments' -id_format: 'projects/{{project}}/locations/{{location}}/capacityCommitments/{{capacity_commitment_id}}' +id_format: '{{name}}' import_format: [ 'projects/{{project}}/locations/{{location}}/capacityCommitments/{{capacity_commitment_id}}', ] custom_code: !ruby/object:Provider::Terraform::CustomCode constants: templates/terraform/constants/bigquery_reservation_capacity_commitment.go.erb + custom_import: templates/terraform/custom_import/bigquery_reservation_capacity_commitment_set_id.go.erb examples: - !ruby/object:Provider::Terraform::Examples name: 'bigquery_reservation_capacity_commitment_basic' pull_external: true skip_docs: true primary_resource_id: 'commitment' + - !ruby/object:Provider::Terraform::Examples + name: 'bigquery_reservation_capacity_commitment_no_id' + pull_external: true + skip_docs: true + primary_resource_id: 'commitment' - !ruby/object:Provider::Terraform::Examples name: 'bigquery_reservation_capacity_commitment_docs' skip_test: true @@ -102,7 +108,7 @@ properties: - !ruby/object:Api::Type::String name: 'renewalPlan' description: | - The plan this capacity commitment is converted to after commitmentEndTime passes. Once the plan is changed, committed period is extended according to commitment plan. Only applicable some commitment plans. + The plan this capacity commitment is converted to after commitmentEndTime passes. Once the plan is changed, committed period is extended according to commitment plan. Only applicable for some commitment plans. - !ruby/object:Api::Type::String name: 'edition' immutable: true diff --git a/mmv1/products/bigtable/AppProfile.yaml b/mmv1/products/bigtable/AppProfile.yaml index 4100ae507fa8..cfe4f4e10512 100644 --- a/mmv1/products/bigtable/AppProfile.yaml +++ b/mmv1/products/bigtable/AppProfile.yaml @@ -43,6 +43,8 @@ examples: deletion_protection: 'false' ignore_read_extra: - 'ignore_warnings' + # bigtable instance does not use the shared HTTP client, this test creates an instance + skip_vcr: true - !ruby/object:Provider::Terraform::Examples name: 'bigtable_app_profile_singlecluster' primary_resource_id: 'ap' @@ -56,6 +58,8 @@ examples: deletion_protection: 'false' ignore_read_extra: - 'ignore_warnings' + # bigtable instance does not use the shared HTTP client, this test creates an instance + skip_vcr: true - !ruby/object:Provider::Terraform::Examples name: 'bigtable_app_profile_multicluster' primary_resource_id: 'ap' @@ -69,6 +73,23 @@ examples: deletion_protection: 'false' ignore_read_extra: - 'ignore_warnings' + # bigtable instance does not use the shared HTTP client, this test creates an instance + skip_vcr: true + - !ruby/object:Provider::Terraform::Examples + name: 'bigtable_app_profile_priority' + primary_resource_id: 'ap' + vars: + instance_name: 'bt-instance' + app_profile_name: 'bt-profile' + deletion_protection: 'true' + test_vars_overrides: + deletion_protection: 'false' + oics_vars_overrides: + deletion_protection: 'false' + ignore_read_extra: + - 'ignore_warnings' + # bigtable instance does not use the shared HTTP client, this test creates an instance + skip_vcr: true custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: templates/terraform/encoders/bigtable_app_profile.go.erb extra_schema_entry: templates/terraform/extra_schema_entry/bigtable_app_profile.go.erb @@ -135,3 +156,18 @@ properties: description: | If true, CheckAndMutateRow and ReadModifyWriteRow requests are allowed by this app profile. It is unsafe to send these requests to the same table/row/column in multiple clusters. + - !ruby/object:Api::Type::NestedObject + name: 'standardIsolation' + default_from_api: true + description: | + The standard options used for isolating this app profile's traffic from other use cases. + properties: + - !ruby/object:Api::Type::Enum + name: 'priority' + required: true + description: | + The priority of requests sent using this app profile. + values: + - :PRIORITY_LOW + - :PRIORITY_MEDIUM + - :PRIORITY_HIGH diff --git a/mmv1/products/billing/ProjectInfo.yaml b/mmv1/products/billing/ProjectInfo.yaml index b55634709d98..e941f6aaaee7 100644 --- a/mmv1/products/billing/ProjectInfo.yaml +++ b/mmv1/products/billing/ProjectInfo.yaml @@ -28,11 +28,12 @@ custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: templates/terraform/encoders/billing_project_info.go.erb test_check_destroy: templates/terraform/custom_check_destroy/billing_project_info.go.erb import_format: - ['projects/{{project}}', '{{project}}'] + ['projects/{{%project}}', '{{%project}}'] examples: - !ruby/object:Provider::Terraform::Examples name: 'billing_project_info_basic' primary_resource_id: 'default' + skip_import_test: true test_env_vars: billing_account: :BILLING_ACCT org_id: :ORG_ID diff --git a/mmv1/products/certificatemanager/Certificate.yaml b/mmv1/products/certificatemanager/Certificate.yaml index 552364c08bba..d6cc703156cb 100644 --- a/mmv1/products/certificatemanager/Certificate.yaml +++ b/mmv1/products/certificatemanager/Certificate.yaml @@ -71,6 +71,23 @@ examples: primary_resource_id: 'default' vars: cert_name: 'self-managed-cert' + - !ruby/object:Provider::Terraform::Examples + name: 'certificate_manager_google_managed_certificate_issuance_config_all_regions' + primary_resource_id: 'default' + vars: + cert_name: 'issuance-config-cert' + ca_name: 'ca-authority' + pool_name: 'ca-pool' + issuance_config_name: 'issuance-config' + - !ruby/object:Provider::Terraform::Examples + name: 'certificate_manager_google_managed_certificate_dns_all_regions' + primary_resource_id: 'default' + vars: + dns_auth_name: 'dns-auth' + dns_auth_subdomain: 'subdomain' + dns_auth_name2: 'dns-auth2' + dns_auth_subdomain2: 'subdomain2' + cert_name: 'dns-cert' custom_code: !ruby/object:Provider::Terraform::CustomCode constants: templates/terraform/constants/cert_manager.erb parameters: @@ -95,7 +112,7 @@ properties: name: 'description' description: | A human-readable description of the resource. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: 'Set of label tags associated with the Certificate resource.' - !ruby/object:Api::Type::String @@ -107,11 +124,11 @@ properties: DEFAULT: Certificates with default scope are served from core Google data centers. If unsure, choose this option. - EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, - served from non-core Google data centers. + EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence. + See https://cloud.google.com/vpc/docs/edge-locations. ALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs). - see https://cloud.google.com/compute/docs/regions-zones + See https://cloud.google.com/compute/docs/regions-zones default_value: DEFAULT diff_suppress_func: 'certManagerDefaultScopeDiffSuppress' - !ruby/object:Api::Type::NestedObject diff --git a/mmv1/products/certificatemanager/CertificateIssuanceConfig.yaml b/mmv1/products/certificatemanager/CertificateIssuanceConfig.yaml index 6a994704207a..fa4cec9f21ac 100644 --- a/mmv1/products/certificatemanager/CertificateIssuanceConfig.yaml +++ b/mmv1/products/certificatemanager/CertificateIssuanceConfig.yaml @@ -109,7 +109,7 @@ properties: accurate to nanoseconds with up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". output: true - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | 'Set of label tags associated with the CertificateIssuanceConfig resource. diff --git a/mmv1/products/certificatemanager/CertificateMap.yaml b/mmv1/products/certificatemanager/CertificateMap.yaml index 1342e8344e46..96df010f7666 100644 --- a/mmv1/products/certificatemanager/CertificateMap.yaml +++ b/mmv1/products/certificatemanager/CertificateMap.yaml @@ -76,11 +76,10 @@ properties: accurate to nanoseconds with up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". output: true - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Set of labels associated with a Certificate Map resource. - default_from_api: true - !ruby/object:Api::Type::Array name: 'gclbTargets' description: | diff --git a/mmv1/products/certificatemanager/CertificateMapEntry.yaml b/mmv1/products/certificatemanager/CertificateMapEntry.yaml index 296e342a6920..aa1ac5d57570 100644 --- a/mmv1/products/certificatemanager/CertificateMapEntry.yaml +++ b/mmv1/products/certificatemanager/CertificateMapEntry.yaml @@ -92,13 +92,12 @@ properties: with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". output: true - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Set of labels associated with a Certificate Map Entry. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - default_from_api: true - !ruby/object:Api::Type::Array name: 'certificates' required: true diff --git a/mmv1/products/certificatemanager/DnsAuthorization.yaml b/mmv1/products/certificatemanager/DnsAuthorization.yaml index fa6e636e7691..bcb50c60c329 100644 --- a/mmv1/products/certificatemanager/DnsAuthorization.yaml +++ b/mmv1/products/certificatemanager/DnsAuthorization.yaml @@ -63,7 +63,7 @@ properties: name: 'description' description: | A human-readable description of the resource. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: 'Set of label tags associated with the DNS Authorization resource.' diff --git a/mmv1/products/certificatemanager/TrustConfig.yaml b/mmv1/products/certificatemanager/TrustConfig.yaml index d4ff25e5256c..d7395fa22948 100644 --- a/mmv1/products/certificatemanager/TrustConfig.yaml +++ b/mmv1/products/certificatemanager/TrustConfig.yaml @@ -84,7 +84,7 @@ properties: A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". output: true - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: 'Set of label tags associated with the trust config.' immutable: true diff --git a/mmv1/products/cloudbuild/BitbucketServerConfig.yaml b/mmv1/products/cloudbuild/BitbucketServerConfig.yaml index d8b2f2763209..bd8bdd316f70 100644 --- a/mmv1/products/cloudbuild/BitbucketServerConfig.yaml +++ b/mmv1/products/cloudbuild/BitbucketServerConfig.yaml @@ -64,8 +64,6 @@ examples: config_id: 'bbs-config' network_name: 'vpc-network' global_address_name: 'private-ip-alloc' - test_vars_overrides: - network_name: 'acctest.BootstrapSharedTestNetwork(t, "peered-network")' custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: templates/terraform/encoders/cloudbuild_bitbucketserver_config.go.erb post_create: templates/terraform/post_create/cloudbuild_bitbucketserver_config.go.erb diff --git a/mmv1/products/cloudfunctions/CloudFunction.yaml b/mmv1/products/cloudfunctions/CloudFunction.yaml index ad310f7293ab..6ac2766e6ffe 100644 --- a/mmv1/products/cloudfunctions/CloudFunction.yaml +++ b/mmv1/products/cloudfunctions/CloudFunction.yaml @@ -129,7 +129,7 @@ properties: description: | The version identifier of the Cloud Function. Each deployment attempt results in a new version of a function being created. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | A set of key/value label pairs associated with this Cloud Function. diff --git a/mmv1/products/cloudfunctions2/Function.yaml b/mmv1/products/cloudfunctions2/Function.yaml index 69d5e04907b7..5b0a0ff36b0a 100644 --- a/mmv1/products/cloudfunctions2/Function.yaml +++ b/mmv1/products/cloudfunctions2/Function.yaml @@ -251,6 +251,7 @@ parameters: - !ruby/object:Api::Type::String name: 'location' immutable: true + required: true url_param_only: true description: The location of this cloud function. properties: @@ -629,7 +630,7 @@ properties: name: 'updateTime' output: true description: 'The last update timestamp of a Cloud Function.' - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | A set of key/value label pairs associated with this Cloud Function. diff --git a/mmv1/products/cloudidentity/Group.yaml b/mmv1/products/cloudidentity/Group.yaml index a27a53223689..0fe37e0e21aa 100644 --- a/mmv1/products/cloudidentity/Group.yaml +++ b/mmv1/products/cloudidentity/Group.yaml @@ -128,6 +128,37 @@ properties: description: | An extended description to help users determine the purpose of a Group. Must not be longer than 4,096 characters. + - !ruby/object:Api::Type::Array + name: 'additionalGroupKeys' + output: true + description: 'Additional group keys associated with the Group' + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'id' + output: true + description: | + The ID of the entity. + + For Google-managed entities, the id must be the email address of an existing + group or user. + + For external-identity-mapped entities, the id must be a string conforming + to the Identity Source's requirements. + + Must be unique within a namespace. + - !ruby/object:Api::Type::String + name: 'namespace' + output: true + description: | + The namespace in which the entity exists. + + If not specified, the EntityKey represents a Google-managed entity + such as a Google user or a Google Group. + + If specified, the EntityKey represents an external-identity-mapped group. + The namespace must correspond to an identity source created in Admin Console + and must be in the form of `identitysources/{identity_source_id}`. - !ruby/object:Api::Type::String name: 'createTime' output: true diff --git a/mmv1/products/cloudiot/Device.yaml b/mmv1/products/cloudiot/Device.yaml deleted file mode 100644 index 6e893b5c3651..000000000000 --- a/mmv1/products/cloudiot/Device.yaml +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright 2023 Google Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- !ruby/object:Api::Resource -name: 'Device' -base_url: '{{registry}}/devices' -self_link: '{{registry}}/devices/{{name}}' -update_verb: :PATCH -update_mask: true -description: | - A Google Cloud IoT Core device. -references: !ruby/object:Api::Resource::ReferenceLinks - guides: - 'Official Documentation': 'https://cloud.google.com/iot/docs/' - api: 'https://cloud.google.com/iot/docs/reference/cloudiot/rest/' -import_format: ['{{%registry}}/devices/{{name}}'] -deprecation_message: >- - `google_cloudiot_device` is deprecated in the API. This resource will be removed in the next major release of the provider. -examples: - - !ruby/object:Provider::Terraform::Examples - name: 'cloudiot_device_basic' - primary_resource_id: 'test-device' - vars: - cloudiot_device_name: 'cloudiot-device' - cloudiot_device_registry_name: 'cloudiot-device-registry' - test_env_vars: - project: :PROJECT_NAME - region: :REGION - - !ruby/object:Provider::Terraform::Examples - name: 'cloudiot_device_full' - primary_resource_id: 'test-device' - vars: - cloudiot_device_name: 'cloudiot-device' - cloudiot_device_registry_name: 'cloudiot-device-registry' - test_env_vars: - project: :PROJECT_NAME - region: :REGION -parameters: - - !ruby/object:Api::Type::String - name: registry - immutable: true - url_param_only: true - required: true - description: | - The name of the device registry where this device should be created. -properties: - - !ruby/object:Api::Type::String - name: 'name' - immutable: true - required: true - description: | - A unique name for the resource. - api_name: 'id' - - !ruby/object:Api::Type::String - name: 'numId' - output: true - description: | - A server-defined unique numeric ID for the device. - This is a more compact way to identify devices, and it is globally unique. - - !ruby/object:Api::Type::Array - name: 'credentials' - description: | - The credentials used to authenticate this device. - max_size: 3 - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::Time - name: 'expirationTime' - description: | - The time at which this credential becomes invalid. - default_from_api: true - - !ruby/object:Api::Type::NestedObject - name: 'publicKey' - required: true - description: | - A public key used to verify the signature of JSON Web Tokens (JWTs). - properties: - - !ruby/object:Api::Type::Enum - name: 'format' - required: true - description: | - The format of the key. - values: - - :RSA_PEM - - :RSA_X509_PEM - - :ES256_PEM - - :ES256_X509_PEM - - !ruby/object:Api::Type::String - name: 'key' - required: true - description: | - The key data. - - !ruby/object:Api::Type::Time - name: 'lastHeartbeatTime' - output: true - description: | - The last time an MQTT PINGREQ was received. - - !ruby/object:Api::Type::Time - name: 'lastEventTime' - output: true - description: | - The last time a telemetry event was received. - - !ruby/object:Api::Type::Time - name: 'lastStateTime' - output: true - description: | - The last time a state event was received. - - !ruby/object:Api::Type::Time - name: 'lastConfigAckTime' - output: true - description: | - The last time a cloud-to-device config version acknowledgment was received from the device. - - !ruby/object:Api::Type::Time - name: 'lastConfigSendTime' - output: true - description: | - The last time a cloud-to-device config version was sent to the device. - - !ruby/object:Api::Type::Boolean - name: 'blocked' - description: | - If a device is blocked, connections or requests from this device will fail. - - !ruby/object:Api::Type::Time - name: 'lastErrorTime' - output: true - description: | - The time the most recent error occurred, such as a failure to publish to Cloud Pub/Sub. - - !ruby/object:Api::Type::NestedObject - name: 'lastErrorStatus' - output: true - description: | - The error message of the most recent error, such as a failure to publish to Cloud Pub/Sub. - properties: - - !ruby/object:Api::Type::Integer - name: 'number' - description: | - The status code, which should be an enum value of google.rpc.Code. - - !ruby/object:Api::Type::String - name: 'message' - description: | - A developer-facing error message, which should be in English. - - !ruby/object:Api::Type::Array - name: 'details' - description: | - A list of messages that carry the error details. - item_type: Api::Type::KeyValuePairs - - !ruby/object:Api::Type::NestedObject - name: 'config' - output: true - description: | - The most recent device configuration, which is eventually sent from Cloud IoT Core to the device. - properties: - - !ruby/object:Api::Type::String - name: 'version' - output: true - description: | - The version of this update. - - !ruby/object:Api::Type::String - name: 'cloudUpdateTime' - output: true - description: | - The time at which this configuration version was updated in Cloud IoT Core. - - !ruby/object:Api::Type::String - name: 'deviceAckTime' - output: true - description: | - The time at which Cloud IoT Core received the acknowledgment from the device, - indicating that the device has received this configuration version. - - !ruby/object:Api::Type::String - name: 'binaryData' - description: | - The device configuration data. - - !ruby/object:Api::Type::NestedObject - name: 'state' - output: true - description: | - The state most recently received from the device. - properties: - - !ruby/object:Api::Type::Time - name: 'updateTime' - description: | - The time at which this state version was updated in Cloud IoT Core. - - !ruby/object:Api::Type::String - name: 'binaryData' - description: | - The device state data. - - !ruby/object:Api::Type::Enum - name: 'logLevel' - allow_empty_object: true - description: | - The logging verbosity for device activity. - values: - - :NONE - - :ERROR - - :INFO - - :DEBUG - - !ruby/object:Api::Type::KeyValuePairs - name: 'metadata' - description: | - The metadata key-value pairs assigned to the device. - - !ruby/object:Api::Type::NestedObject - name: 'gatewayConfig' - description: | - Gateway-related configuration and state. - update_mask_fields: - - 'gateway_config.gateway_auth_method' - properties: - - !ruby/object:Api::Type::Enum - name: 'gatewayType' - default_value: :NON_GATEWAY - immutable: true - description: | - Indicates whether the device is a gateway. - values: - - :GATEWAY - - :NON_GATEWAY - - !ruby/object:Api::Type::Enum - name: 'gatewayAuthMethod' - description: | - Indicates whether the device is a gateway. - values: - - :ASSOCIATION_ONLY - - :DEVICE_AUTH_TOKEN_ONLY - - :ASSOCIATION_AND_DEVICE_AUTH_TOKEN - - !ruby/object:Api::Type::String - name: 'lastAccessedGatewayId' - output: true - description: | - The ID of the gateway the device accessed most recently. - - !ruby/object:Api::Type::Time - name: 'lastAccessedGatewayTime' - output: true - description: | - The most recent time at which the device accessed the gateway specified in last_accessed_gateway. diff --git a/mmv1/products/cloudiot/DeviceRegistry.yaml b/mmv1/products/cloudiot/DeviceRegistry.yaml deleted file mode 100644 index d879e925304d..000000000000 --- a/mmv1/products/cloudiot/DeviceRegistry.yaml +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright 2023 Google Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- !ruby/object:Api::Resource -name: 'DeviceRegistry' -base_url: 'projects/{{project}}/locations/{{region}}/registries' -self_link: 'projects/{{project}}/locations/{{region}}/registries/{{name}}' -update_verb: :PATCH -update_mask: true -description: | - A Google Cloud IoT Core device registry. -references: !ruby/object:Api::Resource::ReferenceLinks - guides: - 'Official Documentation': 'https://cloud.google.com/iot/docs/' - api: 'https://cloud.google.com/iot/docs/reference/cloudiot/rest/' -iam_policy: !ruby/object:Api::Resource::IamPolicy - method_name_separator: ':' - fetch_iam_policy_verb: :POST - parent_resource_attribute: 'name' - import_format: - [ - 'projects/{{project}}/locations/{{location}}/registries/{{name}}', - '{{name}}', - ] -legacy_name: 'google_cloudiot_registry' -import_format: ['{{project}}/locations/{{region}}/registries/{{name}}'] -id_format: 'projects/{{project}}/locations/{{region}}/registries/{{name}}' -custom_code: !ruby/object:Provider::Terraform::CustomCode - constants: templates/terraform/constants/cloudiot.go.erb - decoder: templates/terraform/decoders/cloudiot_device_registry.go.erb - encoder: templates/terraform/encoders/cloudiot_device_registry.go.erb - extra_schema_entry: templates/terraform/extra_schema_entry/cloudiot_device_registry.go.erb - pre_update: templates/terraform/pre_update/cloudiot_device_registry.go.erb -deprecation_message: >- - `google_cloudiot_registry` is deprecated in the API. This resource will be removed in the next major release of the provider. -docs: !ruby/object:Provider::Terraform::Docs - optional_properties: | - * `state_notification_config` - A PubSub topic to publish device state updates. - The structure is documented below. - - * `mqtt_config` - Activate or deactivate MQTT. - The structure is documented below. - - * `http_config` - Activate or deactivate HTTP. - The structure is documented below. - - * `credentials` - List of public key certificates to authenticate devices. - The structure is documented below. - - The `state_notification_config` block supports: - - * `pubsub_topic_name` - PubSub topic name to publish device state updates. - - The `mqtt_config` block supports: - - * `mqtt_enabled_state` - The field allows `MQTT_ENABLED` or `MQTT_DISABLED`. - - The `http_config` block supports: - - * `http_enabled_state` - The field allows `HTTP_ENABLED` or `HTTP_DISABLED`. - - The `credentials` block supports: - - * `public_key_certificate` - A public key certificate format and data. - - The `public_key_certificate` block supports: - - * `format` - The field allows only `X509_CERTIFICATE_PEM`. - - * `certificate` - The certificate data. -examples: - - !ruby/object:Provider::Terraform::Examples - name: 'cloudiot_device_registry_basic' - primary_resource_id: 'test-registry' - primary_resource_name: "fmt.Sprintf(\"tf-test-cloudiot-registry%s\", - context[\"\ - random_suffix\"])" - vars: - cloudiot_registry_name: 'cloudiot-registry' - test_env_vars: - project: :PROJECT_NAME - region: :REGION - - !ruby/object:Provider::Terraform::Examples - name: 'cloudiot_device_registry_single_event_notification_configs' - primary_resource_id: 'test-registry' - vars: - cloudiot_registry_name: 'cloudiot-registry' - cloudiot_device_telemetry_topic_name: 'default-telemetry' - test_env_vars: - project: :PROJECT_NAME - region: :REGION - - !ruby/object:Provider::Terraform::Examples - name: 'cloudiot_device_registry_full' - primary_resource_id: 'test-registry' - vars: - cloudiot_registry_name: 'cloudiot-registry' - cloudiot_device_status_topic_name: 'default-devicestatus' - cloudiot_device_telemetry_topic_name: 'default-telemetry' - cloudiot_additional_device_telemetry_topic_name: 'additional-telemetry' - cloudiot_subfolder_matches_additional_device_telemetry_topic: 'test/path' - test_env_vars: - project: :PROJECT_NAME - region: :REGION -parameters: - - !ruby/object:Api::Type::String - name: region - immutable: true - url_param_only: true - required: false - description: | - The region in which the created registry should reside. - If it is not provided, the provider region is used. - ignore_read: true - default_from_api: true -properties: - - !ruby/object:Api::Type::String - name: 'name' - immutable: true - required: true - description: | - A unique name for the resource, required by device registry. - api_name: 'id' - validation: !ruby/object:Provider::Terraform::Validation - function: 'ValidateCloudIotDeviceRegistryID' - - !ruby/object:Api::Type::Array - name: 'eventNotificationConfigs' - description: | - List of configurations for event notifications, such as PubSub topics - to publish device events to. - max_size: 10 - default_from_api: true - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::String - name: 'subfolderMatches' - description: | - If the subfolder name matches this string exactly, this - configuration will be used. The string must not include the - leading '/' character. If empty, all strings are matched. Empty - value can only be used for the last `event_notification_configs` - item. - validation: !ruby/object:Provider::Terraform::Validation - function: 'validateCloudIotDeviceRegistrySubfolderMatch' - - !ruby/object:Api::Type::String - name: 'pubsubTopicName' - required: true - description: | - PubSub topic name to publish device events. - diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' - - !ruby/object:Api::Type::Enum - name: 'logLevel' - default_value: :NONE - description: | - The default logging verbosity for activity from devices in this - registry. Specifies which events should be written to logs. For - example, if the LogLevel is ERROR, only events that terminate in - errors will be logged. LogLevel is inclusive; enabling INFO logging - will also enable ERROR logging. - values: - - :NONE - - :ERROR - - :INFO - - :DEBUG - diff_suppress_func: 'tpgresource.EmptyOrDefaultStringSuppress("NONE")' diff --git a/mmv1/products/cloudrun/DomainMapping.yaml b/mmv1/products/cloudrun/DomainMapping.yaml index 8eb9730a654c..82aa8c386dd3 100644 --- a/mmv1/products/cloudrun/DomainMapping.yaml +++ b/mmv1/products/cloudrun/DomainMapping.yaml @@ -42,6 +42,11 @@ custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: 'templates/terraform/encoders/cloud_run_domain_mapping.go.erb' decoder: 'templates/terraform/decoders/cloud_run.go.erb' constants: templates/terraform/constants/cloud_run_domain_mapping.go.erb +custom_diff: [ + 'hasMetadata', +] +state_upgraders: true +schema_version: 1 parameters: - !ruby/object:Api::Type::String name: location @@ -154,18 +159,16 @@ properties: default_value: :AUTOMATIC - !ruby/object:Api::Type::NestedObject name: metadata - required: true + default_from_api: true description: Metadata associated with this DomainMapping. properties: - - !ruby/object:Api::Type::KeyValuePairs - name: labels + - !ruby/object:Api::Type::KeyValueLabels + name: 'labels' description: |- Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels - default_from_api: true - diff_suppress_func: 'DomainMappingLabelDiffSuppress' - !ruby/object:Api::Type::Integer name: generation description: |- @@ -203,8 +206,8 @@ properties: In Cloud Run the namespace must be equal to either the project ID or project number. custom_flatten: templates/terraform/custom_flatten/set_to_project.go.erb - - !ruby/object:Api::Type::KeyValuePairs - name: annotations + - !ruby/object:Api::Type::KeyValueAnnotations + name: 'annotations' description: |- Annotations is a key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. More @@ -213,5 +216,3 @@ properties: **Note**: The Cloud Run API may add additional annotations that were not provided in your config. If terraform plan shows a diff where a server-side annotation is added, you can add it to your config or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field. - default_from_api: true - diff_suppress_func: 'cloudrunAnnotationDiffSuppress' diff --git a/mmv1/products/cloudrun/Service.yaml b/mmv1/products/cloudrun/Service.yaml index 1160362ab594..99dd210064f2 100644 --- a/mmv1/products/cloudrun/Service.yaml +++ b/mmv1/products/cloudrun/Service.yaml @@ -152,7 +152,9 @@ custom_code: !ruby/object:Provider::Terraform::CustomCode custom_diff: [ 'revisionNameCustomizeDiff', ] -schema_version: 1 +state_upgraders: true +state_upgrade_base_schema_version: 1 +schema_version: 2 parameters: - !ruby/object:Api::Type::String name: location @@ -240,7 +242,7 @@ properties: default_from_api: true properties: - !ruby/object:Api::Type::KeyValuePairs - name: labels + name: 'labels' description: |- Map of string keys and values that can be used to organize and categorize (scope and select) objects. @@ -279,7 +281,7 @@ properties: default_from_api: true custom_expand: 'templates/terraform/custom_expand/default_to_project.go.erb' - !ruby/object:Api::Type::KeyValuePairs - name: annotations + name: 'annotations' description: |- Annotations is a key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. More @@ -970,14 +972,12 @@ properties: and annotations. default_from_api: true properties: - - !ruby/object:Api::Type::KeyValuePairs - name: labels + - !ruby/object:Api::Type::KeyValueLabels + name: 'labels' description: |- Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. - default_from_api: true - diff_suppress_func: 'cloudrunLabelDiffSuppress' - !ruby/object:Api::Type::Integer name: generation description: |- @@ -1012,8 +1012,8 @@ properties: default_from_api: true custom_flatten: templates/terraform/custom_flatten/set_to_project.go.erb custom_expand: 'templates/terraform/custom_expand/default_to_project.go.erb' - - !ruby/object:Api::Type::KeyValuePairs - name: annotations + - !ruby/object:Api::Type::KeyValueAnnotations + name: 'annotations' description: |- Annotations is a key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. More @@ -1036,5 +1036,3 @@ properties: for the Service. For example, `"run.googleapis.com/ingress" = "all"`. - `run.googleapis.com/launch-stage` sets the [launch stage](https://cloud.google.com/run/docs/troubleshooting#launch-stage-validation) when a preview feature is used. For example, `"run.googleapis.com/launch-stage": "BETA"` - default_from_api: true - diff_suppress_func: 'cloudrunAnnotationDiffSuppress' diff --git a/mmv1/products/cloudrunv2/Job.yaml b/mmv1/products/cloudrunv2/Job.yaml index d484d5e717c6..fe8509b65791 100644 --- a/mmv1/products/cloudrunv2/Job.yaml +++ b/mmv1/products/cloudrunv2/Job.yaml @@ -84,6 +84,13 @@ examples: vpc_access_connector_name: 'run-vpc' vpc_compute_subnetwork_name: 'run-subnetwork' compute_network_name: 'run-network' + - !ruby/object:Provider::Terraform::Examples + name: 'cloudrunv2_job_directvpc' + primary_resource_id: 'default' + primary_resource_name: "fmt.Sprintf(\"tf-test-cloudrun-job%s\", context[\"random_suffix\"\ + ])" + vars: + cloud_run_job_name: 'cloudrun-job' - !ruby/object:Provider::Terraform::Examples name: 'cloudrunv2_job_secret' primary_resource_id: 'default' @@ -104,6 +111,7 @@ examples: parameters: - !ruby/object:Api::Type::String name: 'location' + required: true immutable: true url_param_only: true description: The location of the cloud run job @@ -129,7 +137,7 @@ properties: output: true description: | A number that monotonically increases every time the user modifies the desired state. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: |- Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, @@ -137,8 +145,8 @@ properties: Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Job. - - !ruby/object:Api::Type::KeyValuePairs - name: "annotations" + - !ruby/object:Api::Type::KeyValueAnnotations + name: 'annotations' description: |- Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. @@ -229,7 +237,7 @@ properties: Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 ExecutionTemplate. - !ruby/object:Api::Type::KeyValuePairs - name: "annotations" + name: 'annotations' description: |- Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. @@ -367,160 +375,6 @@ properties: name: 'workingDir' description: |- Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. - - !ruby/object:Api::Type::NestedObject - name: 'livenessProbe' - description: |- - Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - This field is not supported in Cloud Run Job currently. - deprecation_message: >- - `liveness_probe` is deprecated and will be removed in a future major release. This field is not supported by the Cloud Run API. - properties: - - !ruby/object:Api::Type::Integer - name: 'initialDelaySeconds' - description: |- - Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - default_value: 0 - - !ruby/object:Api::Type::Integer - name: 'timeoutSeconds' - description: |- - Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - default_value: 1 - - !ruby/object:Api::Type::Integer - name: 'periodSeconds' - description: |- - How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds - default_value: 10 - - !ruby/object:Api::Type::Integer - name: 'failureThreshold' - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - default_value: 3 - - !ruby/object:Api::Type::NestedObject - name: 'httpGet' - description: |- - HTTPGet specifies the http request to perform. Exactly one of HTTPGet or TCPSocket must be specified. - send_empty_value: true - allow_empty_object: - true - # exactly_one_of: - # - template.0.template.0.containers.0.livenessProbe.0.httpGet - # - template.0.template.0.containers.0.livenessProbe.0.tcpSocket - properties: - - !ruby/object:Api::Type::String - name: 'path' - default_value: '/' - description: |- - Path to access on the HTTP server. Defaults to '/'. - - !ruby/object:Api::Type::Array - name: httpHeaders - description: |- - Custom headers to set in the request. HTTP allows repeated headers. - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::String - name: name - required: true - description: |- - The header field name - - !ruby/object:Api::Type::String - name: value - description: |- - The header field value - default_value: '' - send_empty_value: true - - !ruby/object:Api::Type::NestedObject - name: 'tcpSocket' - description: |- - TCPSocket specifies an action involving a TCP port. Exactly one of HTTPGet or TCPSocket must be specified. - send_empty_value: true - allow_empty_object: - true - # exactly_one_of: - # - template.0.template.0.containers.0.livenessProbe.0.httpGet - # - template.0.template.0.containers.0.livenessProbe.0.tcpSocket - properties: - - !ruby/object:Api::Type::Integer - name: port - description: |- - Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to 8080. - - !ruby/object:Api::Type::NestedObject - name: 'startupProbe' - description: |- - Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - This field is not supported in Cloud Run Job currently. - deprecation_message: >- - `startup_probe` is deprecated and will be removed in a future major release. This field is not supported by the Cloud Run API. - default_from_api: true - properties: - - !ruby/object:Api::Type::Integer - name: 'initialDelaySeconds' - description: |- - Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - default_value: 0 - - !ruby/object:Api::Type::Integer - name: 'timeoutSeconds' - description: |- - Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - default_value: 1 - - !ruby/object:Api::Type::Integer - name: 'periodSeconds' - description: |- - How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds - default_value: 10 - - !ruby/object:Api::Type::Integer - name: 'failureThreshold' - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - default_value: 3 - - !ruby/object:Api::Type::NestedObject - name: 'httpGet' - description: |- - HTTPGet specifies the http request to perform. Exactly one of HTTPGet or TCPSocket must be specified. - send_empty_value: true - allow_empty_object: - true - # exactly_one_of: - # - template.0.template.0.containers.startupProbe.0.httpGet - # - template.0.template.0.containers.startupProbe.0.tcpSocket - properties: - - !ruby/object:Api::Type::String - name: 'path' - default_value: '/' - description: |- - Path to access on the HTTP server. Defaults to '/'. - - !ruby/object:Api::Type::Array - name: 'httpHeaders' - description: |- - Custom headers to set in the request. HTTP allows repeated headers. - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::String - name: name - required: true - description: |- - The header field name - - !ruby/object:Api::Type::String - name: value - description: |- - The header field value - default_value: '' - send_empty_value: true - - !ruby/object:Api::Type::NestedObject - name: 'tcpSocket' - description: |- - TCPSocket specifies an action involving a TCP port. Exactly one of HTTPGet or TCPSocket must be specified. - send_empty_value: true - allow_empty_object: - true - # exactly_one_of: - # - template.0.template.0.containers.startupProbe.0.httpGet - # - template.0.template.0.containers.startupProbe.0.tcpSocket - properties: - - !ruby/object:Api::Type::Integer - name: port - description: |- - Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to 8080. - default_from_api: true - !ruby/object:Api::Type::Array name: 'volumes' description: |- @@ -645,6 +499,32 @@ properties: values: - :ALL_TRAFFIC - :PRIVATE_RANGES_ONLY + default_from_api: true + - !ruby/object:Api::Type::Array + name: 'networkInterfaces' + description: |- + Direct VPC egress settings. Currently only single network interface is supported. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'network' + description: |- + The VPC network that the Cloud Run resource will be able to send traffic to. At least one of network or subnetwork must be specified. If both + network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If network is not specified, it will be + looked up from the subnetwork. + default_from_api: true + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: |- + The VPC subnetwork that the Cloud Run resource will get IPs from. At least one of network or subnetwork must be specified. If both + network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If subnetwork is not specified, the + subnetwork with the same name with the network will be used. + default_from_api: true + - !ruby/object:Api::Type::Array + name: 'tags' + description: |- + Network tags applied to this Cloud Run job. + item_type: Api::Type::String - !ruby/object:Api::Type::Integer name: 'maxRetries' description: |- diff --git a/mmv1/products/cloudrunv2/Service.yaml b/mmv1/products/cloudrunv2/Service.yaml index ae586f92dabe..a96911958bae 100644 --- a/mmv1/products/cloudrunv2/Service.yaml +++ b/mmv1/products/cloudrunv2/Service.yaml @@ -87,6 +87,14 @@ examples: vpc_access_connector_name: 'run-vpc' vpc_compute_subnetwork_name: 'run-subnetwork' compute_network_name: 'run-network' + - !ruby/object:Provider::Terraform::Examples + name: 'cloudrunv2_service_directvpc' + primary_resource_id: 'default' + primary_resource_name: "fmt.Sprintf(\"tf-test-cloudrun-srv%s\", + context[\"random_suffix\"\ + ])" + vars: + cloud_run_service_name: 'cloudrun-service' - !ruby/object:Provider::Terraform::Examples name: 'cloudrunv2_service_probes' primary_resource_id: 'default' @@ -115,6 +123,7 @@ examples: parameters: - !ruby/object:Api::Type::String name: 'location' + required: true immutable: true url_param_only: true description: The location of the cloud run service @@ -144,7 +153,7 @@ properties: output: true description: | A number that monotonically increases every time the user modifies the desired state. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a string instead of an integer. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: |- Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, @@ -152,7 +161,7 @@ properties: Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Service. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueAnnotations name: 'annotations' description: |- Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. @@ -238,7 +247,7 @@ properties: description: | If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. - !ruby/object:Api::Type::Array - name: 'custom_audiences' + name: 'customAudiences' min_version: beta description: | One or more custom audiences that you want this service to support. Specify each custom audience as the full URL in a string. The custom audiences are encoded in the token and used to authenticate requests. @@ -301,6 +310,32 @@ properties: values: - :ALL_TRAFFIC - :PRIVATE_RANGES_ONLY + default_from_api: true + - !ruby/object:Api::Type::Array + name: 'networkInterfaces' + description: |- + Direct VPC egress settings. Currently only single network interface is supported. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'network' + description: |- + The VPC network that the Cloud Run resource will be able to send traffic to. At least one of network or subnetwork must be specified. If both + network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If network is not specified, it will be + looked up from the subnetwork. + default_from_api: true + - !ruby/object:Api::Type::String + name: 'subnetwork' + description: |- + The VPC subnetwork that the Cloud Run resource will get IPs from. At least one of network or subnetwork must be specified. If both + network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If subnetwork is not specified, the + subnetwork with the same name with the network will be used. + default_from_api: true + - !ruby/object:Api::Type::Array + name: 'tags' + description: |- + Network tags applied to this Cloud Run service. + item_type: Api::Type::String - !ruby/object:Api::Type::String name: 'timeout' description: |- @@ -496,19 +531,6 @@ properties: The header field value default_value: '' send_empty_value: true - - !ruby/object:Api::Type::NestedObject - name: 'tcpSocket' - description: |- - TCPSocket specifies an action involving a TCP port. This field is not supported in liveness probe currently. - deprecation_message: >- - `tcp_socket` is deprecated and will be removed in a future major release. This field is not supported by the Cloud Run API. - send_empty_value: true - allow_empty_object: true - properties: - - !ruby/object:Api::Type::Integer - name: port - description: |- - Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to 8080. - !ruby/object:Api::Type::NestedObject name: grpc description: |- @@ -703,6 +725,7 @@ properties: description: |- The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance} item_type: Api::Type::String + is_set: true - !ruby/object:Api::Type::NestedObject name: 'emptyDir' description: |- diff --git a/mmv1/products/compute/Address.yaml b/mmv1/products/compute/Address.yaml index e013503a7882..e137f2286548 100644 --- a/mmv1/products/compute/Address.yaml +++ b/mmv1/products/compute/Address.yaml @@ -194,13 +194,12 @@ properties: description: 'The URLs of the resources that are using this address.' item_type: Api::Type::String output: true - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Labels to apply to this address. A list of key->value pairs. update_verb: :POST update_url: 'projects/{{project}}/regions/{{region}}/addresses/{{name}}/setLabels' - min_version: beta - !ruby/object:Api::Type::Fingerprint name: 'labelFingerprint' description: | @@ -208,7 +207,6 @@ properties: internally during updates. update_url: 'projects/{{project}}/regions/{{region}}/addresses/{{name}}/setLabels' update_verb: :POST - min_version: beta - !ruby/object:Api::Type::ResourceRef name: 'network' resource: 'Network' diff --git a/mmv1/products/compute/Autoscaler.yaml b/mmv1/products/compute/Autoscaler.yaml index e10212ef636b..f62d4f9a1b99 100644 --- a/mmv1/products/compute/Autoscaler.yaml +++ b/mmv1/products/compute/Autoscaler.yaml @@ -84,6 +84,7 @@ parameters: URL of the zone where the instance group resides. required: false immutable: true + ignore_read: true default_from_api: true custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' properties: @@ -152,15 +153,11 @@ properties: instance may take to initialize. To do this, create an instance and time the startup process. default_value: 60 - - !ruby/object:Api::Type::Enum + - !ruby/object:Api::Type::String name: 'mode' - default_value: :ON + default_value: 'ON' description: | Defines operating mode for this policy. - values: - - :OFF - - :ONLY_UP - - :ON - !ruby/object:Api::Type::NestedObject name: 'scaleDownControl' min_version: beta @@ -367,6 +364,7 @@ properties: TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value. min_version: beta + default_value: 'resource.type = gce_instance' - !ruby/object:Api::Type::NestedObject name: 'loadBalancingUtilization' description: | diff --git a/mmv1/products/compute/BackendService.yaml b/mmv1/products/compute/BackendService.yaml index de689673f7d8..cc9837062cba 100644 --- a/mmv1/products/compute/BackendService.yaml +++ b/mmv1/products/compute/BackendService.yaml @@ -958,8 +958,8 @@ properties: name: 'outlierDetection' description: | Settings controlling eviction of unhealthy hosts from the load balancing pool. - This field is applicable only when the load_balancing_scheme is set - to INTERNAL_SELF_MANAGED. + Applicable backend service types can be a global backend service with the + loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. properties: - !ruby/object:Api::Type::NestedObject name: 'baseEjectionTime' diff --git a/mmv1/products/compute/Disk.yaml b/mmv1/products/compute/Disk.yaml index 58935e9fa227..1a9893cd2d20 100644 --- a/mmv1/products/compute/Disk.yaml +++ b/mmv1/products/compute/Disk.yaml @@ -290,7 +290,7 @@ properties: name: 'lastDetachTimestamp' description: 'Last detach timestamp in RFC3339 text format.' output: true - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Labels to apply to this disk. A list of key->value pairs. diff --git a/mmv1/products/compute/DiskResourcePolicyAttachment.yaml b/mmv1/products/compute/DiskResourcePolicyAttachment.yaml index c2cbb0524deb..0926037d8e30 100644 --- a/mmv1/products/compute/DiskResourcePolicyAttachment.yaml +++ b/mmv1/products/compute/DiskResourcePolicyAttachment.yaml @@ -77,6 +77,7 @@ parameters: description: 'A reference to the zone where the disk resides.' required: false url_param_only: true + ignore_read: true default_from_api: true properties: - !ruby/object:Api::Type::String diff --git a/mmv1/products/compute/ExternalVpnGateway.yaml b/mmv1/products/compute/ExternalVpnGateway.yaml index 7fd92d7952f3..271b0f23daee 100644 --- a/mmv1/products/compute/ExternalVpnGateway.yaml +++ b/mmv1/products/compute/ExternalVpnGateway.yaml @@ -63,7 +63,7 @@ properties: - !ruby/object:Api::Type::String name: 'description' description: 'An optional description of this resource.' - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: 'Labels for the external VPN gateway resource.' update_verb: :POST diff --git a/mmv1/products/compute/ForwardingRule.yaml b/mmv1/products/compute/ForwardingRule.yaml index 138294268651..d0258cfdf70a 100644 --- a/mmv1/products/compute/ForwardingRule.yaml +++ b/mmv1/products/compute/ForwardingRule.yaml @@ -216,6 +216,10 @@ examples: - "target" custom_code: !ruby/object:Provider::Terraform::CustomCode post_create: templates/terraform/post_create/labels.erb + constants: 'templates/terraform/constants/compute_forwarding_rule.go.erb' +custom_diff: [ + 'forwardingRuleCustomizeDiff', +] parameters: - !ruby/object:Api::Type::ResourceRef name: 'region' @@ -398,56 +402,58 @@ properties: - !ruby/object:Api::Type::String name: 'portRange' description: | - This field can only be used: - - * If `IPProtocol` is one of TCP, UDP, or SCTP. - * By backend service-based network load balancers, target pool-based - network load balancers, internal proxy load balancers, external proxy load - balancers, Traffic Director, external protocol forwarding, and Classic VPN. - Some products have restrictions on what ports can be used. See + The `ports`, `portRange`, and `allPorts` fields are mutually exclusive. + Only packets addressed to ports in the specified range will be forwarded + to the backends configured with this forwarding rule. + + The `portRange` field has the following limitations: + * It requires that the forwarding rule `IPProtocol` be TCP, UDP, or SCTP, + and + * It's applicable only to the following products: external passthrough + Network Load Balancers, internal and external proxy Network Load + Balancers, internal and external Application Load Balancers, external + protocol forwarding, and Classic VPN. + * Some products have restrictions on what ports can be used. See [port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications) for details. - - Only packets addressed to ports in the specified range will be forwarded to - the backends configured with this forwarding rule. - - The `ports` and `port_range` fields are mutually exclusive. - For external forwarding rules, two or more forwarding rules cannot use the - same `[IPAddress, IPProtocol]` pair, and cannot have - overlapping `portRange`s. + same `[IPAddress, IPProtocol]` pair, and cannot have overlapping + `portRange`s. For internal forwarding rules within the same VPC network, two or more - forwarding rules cannot use the same `[IPAddress, IPProtocol]` - pair, and cannot have overlapping `portRange`s. + forwarding rules cannot use the same `[IPAddress, IPProtocol]` pair, and + cannot have overlapping `portRange`s. + + @pattern: \d+(?:-\d+)? diff_suppress_func: 'tpgresource.PortRangeDiffSuppress' default_from_api: true - !ruby/object:Api::Type::Array name: 'ports' max_size: 5 description: | - This field can only be used: - - * If `IPProtocol` is one of TCP, UDP, or SCTP. - * By internal TCP/UDP load balancers, backend service-based network load - balancers, internal protocol forwarding and when protocol is not L3_DEFAULT. - - - You can specify a list of up to five ports by number, separated by commas. - The ports can be contiguous or discontiguous. Only packets addressed to - these ports will be forwarded to the backends configured with this - forwarding rule. + The `ports`, `portRange`, and `allPorts` fields are mutually exclusive. + Only packets addressed to ports in the specified range will be forwarded + to the backends configured with this forwarding rule. + + The `ports` field has the following limitations: + * It requires that the forwarding rule `IPProtocol` be TCP, UDP, or SCTP, + and + * It's applicable only to the following products: internal passthrough + Network Load Balancers, backend service-based external passthrough Network + Load Balancers, and internal protocol forwarding. + * You can specify a list of up to five ports by number, separated by + commas. The ports can be contiguous or discontiguous. For external forwarding rules, two or more forwarding rules cannot use the - same `[IPAddress, IPProtocol]` pair, and cannot share any values - defined in `ports`. + same `[IPAddress, IPProtocol]` pair if they share at least one port + number. For internal forwarding rules within the same VPC network, two or more - forwarding rules cannot use the same `[IPAddress, IPProtocol]` - pair, and cannot share any values defined in `ports`. + forwarding rules cannot use the same `[IPAddress, IPProtocol]` pair if + they share at least one port number. - The `ports` and `port_range` fields are mutually exclusive. + @pattern: \d+(?:-\d+)? is_set: true custom_expand: 'templates/terraform/custom_expand/set_to_list.erb' item_type: Api::Type::String @@ -503,7 +509,7 @@ properties: send_empty_value: true update_verb: :PATCH update_url: projects/{{project}}/regions/{{region}}/forwardingRules/{{name}} - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Labels to apply to this forwarding rule. A list of key->value pairs. @@ -519,21 +525,21 @@ properties: - !ruby/object:Api::Type::Boolean name: 'allPorts' description: | - This field can only be used: - * If `IPProtocol` is one of TCP, UDP, or SCTP. - * By internal TCP/UDP load balancers, backend service-based network load - balancers, and internal and external protocol forwarding. - - This option should be set to TRUE when the Forwarding Rule - IPProtocol is set to L3_DEFAULT. - - Set this field to true to allow packets addressed to any port or packets + The `ports`, `portRange`, and `allPorts` fields are mutually exclusive. + Only packets addressed to ports in the specified range will be forwarded + to the backends configured with this forwarding rule. + + The `allPorts` field has the following limitations: + * It requires that the forwarding rule `IPProtocol` be TCP, UDP, SCTP, or + L3_DEFAULT. + * It's applicable only to the following products: internal passthrough + Network Load Balancers, backend service-based external passthrough Network + Load Balancers, and internal and external protocol forwarding. + * Set this field to true to allow packets addressed to any port or packets lacking destination port information (for example, UDP fragments after the first fragment) to be forwarded to the backends configured with this - forwarding rule. - - The `ports`, `port_range`, and - `allPorts` fields are mutually exclusive. + forwarding rule. The L3_DEFAULT protocol requires `allPorts` be set to + true. - !ruby/object:Api::Type::Enum name: 'networkTier' description: | @@ -650,3 +656,9 @@ properties: - :IPV6 immutable: true default_from_api: true +virtual_fields: + - !ruby/object:Api::Type::Boolean + name: recreate_closed_psc + description: + This is used in PSC consumer ForwardingRule to make terraform recreate the ForwardingRule when the status is closed + default_value: false diff --git a/mmv1/products/compute/GlobalAddress.yaml b/mmv1/products/compute/GlobalAddress.yaml index b5cd3bd89ca5..de7ef1a6bd01 100644 --- a/mmv1/products/compute/GlobalAddress.yaml +++ b/mmv1/products/compute/GlobalAddress.yaml @@ -58,6 +58,7 @@ examples: network_name: 'my-network-name' custom_code: !ruby/object:Provider::Terraform::CustomCode post_create: templates/terraform/post_create/labels.erb + pre_create: templates/terraform/pre_create/compute_global_address.go.erb properties: - !ruby/object:Api::Type::String name: 'address' @@ -86,7 +87,7 @@ properties: characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. required: true - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Labels to apply to this address. A list of key->value pairs. diff --git a/mmv1/products/compute/GlobalForwardingRule.yaml b/mmv1/products/compute/GlobalForwardingRule.yaml index ee6df3585dfd..f3b30c449c44 100644 --- a/mmv1/products/compute/GlobalForwardingRule.yaml +++ b/mmv1/products/compute/GlobalForwardingRule.yaml @@ -296,7 +296,7 @@ properties: values: - :IPV4 - :IPV6 - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Labels to apply to this forwarding rule. A list of key->value pairs. @@ -423,24 +423,26 @@ properties: - !ruby/object:Api::Type::String name: 'portRange' description: | - This field can only be used: - - * If `IPProtocol` is one of TCP, UDP, or SCTP. - * By backend service-based network load balancers, target pool-based - network load balancers, internal proxy load balancers, external proxy load - balancers, Traffic Director, external protocol forwarding, and Classic VPN. - Some products have restrictions on what ports can be used. See + The `portRange` field has the following limitations: + * It requires that the forwarding rule `IPProtocol` be TCP, UDP, or SCTP, + and + * It's applicable only to the following products: external passthrough + Network Load Balancers, internal and external proxy Network Load + Balancers, internal and external Application Load Balancers, external + protocol forwarding, and Classic VPN. + * Some products have restrictions on what ports can be used. See [port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications) for details. + For external forwarding rules, two or more forwarding rules cannot use the + same `[IPAddress, IPProtocol]` pair, and cannot have overlapping + `portRange`s. + + For internal forwarding rules within the same VPC network, two or more + forwarding rules cannot use the same `[IPAddress, IPProtocol]` pair, and + cannot have overlapping `portRange`s. - * TargetHttpProxy: 80, 8080 - * TargetHttpsProxy: 443 - * TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, - 1883, 5222 - * TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, - 1883, 5222 - * TargetVpnGateway: 500, 4500 + @pattern: \d+(?:-\d+)? diff_suppress_func: 'tpgresource.PortRangeDiffSuppress' # This is a multi-resource resource reference (TargetHttp(s)Proxy, # TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, diff --git a/mmv1/products/compute/GlobalNetworkEndpoint.yaml b/mmv1/products/compute/GlobalNetworkEndpoint.yaml index 116b70b41d37..e00f9b6f13ce 100644 --- a/mmv1/products/compute/GlobalNetworkEndpoint.yaml +++ b/mmv1/products/compute/GlobalNetworkEndpoint.yaml @@ -93,6 +93,8 @@ properties: Port number of the external endpoint. required: true custom_flatten: templates/terraform/custom_flatten/float64_to_int.go.erb + validation: !ruby/object:Provider::Terraform::Validation + function: 'validation.IntAtLeast(1)' - !ruby/object:Api::Type::String name: 'ipAddress' description: | diff --git a/mmv1/products/compute/Image.yaml b/mmv1/products/compute/Image.yaml index 2c428d454187..97894c4910c1 100644 --- a/mmv1/products/compute/Image.yaml +++ b/mmv1/products/compute/Image.yaml @@ -147,6 +147,7 @@ properties: - :SEV_SNP_CAPABLE - :SUSPEND_RESUME_COMPATIBLE - :TDX_CAPABLE + - :SEV_LIVE_MIGRATABLE_V2 - !ruby/object:Api::Type::NestedObject name: 'imageEncryptionKey' description: | @@ -170,7 +171,7 @@ properties: The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: Labels to apply to this Image. update_verb: :POST diff --git a/mmv1/products/compute/Instance.yaml b/mmv1/products/compute/Instance.yaml index 4d8111c04eae..2c8b0c2671fd 100644 --- a/mmv1/products/compute/Instance.yaml +++ b/mmv1/products/compute/Instance.yaml @@ -295,7 +295,7 @@ properties: internally during updates. update_url: 'projects/{{project}}/zones/{{zone}}/instances/{{name}}/setLabels' update_verb: :POST - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Labels to apply to this instance. A list of key->value pairs. @@ -497,6 +497,14 @@ properties: should be specified. # networkInterfaces.kind is not necessary for convergence. custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' + - !ruby/object:Api::Type::ResourceRef + name: 'networkAttachment' + resource: 'networkAttachment' + min_version: beta + imports: 'selfLink' + description: | + The URL of the network attachment that this interface should connect to in the following format: + projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}. - !ruby/object:Api::Type::NestedObject name: 'scheduling' description: Sets the scheduling options for this instance. diff --git a/mmv1/products/compute/InstanceGroupNamedPort.yaml b/mmv1/products/compute/InstanceGroupNamedPort.yaml index 2cc5e2a69e49..be2ae5f63aa8 100644 --- a/mmv1/products/compute/InstanceGroupNamedPort.yaml +++ b/mmv1/products/compute/InstanceGroupNamedPort.yaml @@ -71,6 +71,11 @@ examples: network_name: 'container-network' subnetwork_name: 'container-subnetwork' gke_cluster_name: 'my-cluster' + deletion_protection: 'true' + test_vars_overrides: + deletion_protection: 'false' + oics_vars_overrides: + deletion_protection: 'false' custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: 'templates/terraform/encoders/normalize_group.go.erb' parameters: diff --git a/mmv1/products/compute/InstanceSettings.yaml b/mmv1/products/compute/InstanceSettings.yaml new file mode 100644 index 000000000000..0f3e43f97567 --- /dev/null +++ b/mmv1/products/compute/InstanceSettings.yaml @@ -0,0 +1,80 @@ +# Copyright 2023 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'InstanceSettings' +kind: 'compute#instanceSettings' +base_url: projects/{{project}}/zones/{{zone}}/instanceSettings +self_link: projects/{{project}}/zones/{{zone}}/instanceSettings +description: | + Represents an Instance Settings resource. Instance settings are centralized configuration parameters that allow users to configure the default values for specific VM parameters that are normally set using GCE instance API methods. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Update Instance Settings': 'https://cloud.google.com/compute/docs/metadata/setting-custom-metadata#set-custom-project-zonal-metadata' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/instanceSettings' +create_verb: :PATCH +create_url: 'projects/{{project}}/zones/{{zone}}/instanceSettings?update_mask=*' +update_verb: :PATCH +update_url: 'projects/{{project}}/zones/{{zone}}/instanceSettings?update_mask=*' +import_format: ['projects/{{project}}/zones/{{zone}}/instanceSettings'] +min_version: beta +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + kind: 'compute#operation' + path: 'name' + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'targetLink' + status: !ruby/object:Api::OpAsync::Status + path: 'status' + complete: 'DONE' + allowed: + - 'PENDING' + - 'RUNNING' + - 'DONE' + error: !ruby/object:Api::OpAsync::Error + path: 'error/errors' + message: 'message' +examples: + - !ruby/object:Provider::Terraform::Examples + min_version: beta + name: 'instance_settings_basic' + primary_resource_id: 'gce_instance_settings' +custom_code: !ruby/object:Provider::Terraform::CustomCode + custom_delete: templates/terraform/custom_delete/clear_instance_settings.go.erb + test_check_destroy: templates/terraform/custom_check_destroy/skip_delete_during_test.go.erb +parameters: + - !ruby/object:Api::Type::ResourceRef + name: 'zone' + resource: 'Zone' + imports: 'name' + description: 'A reference to the zone where the machine resides.' + required: true + custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' +properties: + - !ruby/object:Api::Type::Fingerprint + name: 'fingerprint' + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + custom_expand: 'templates/terraform/custom_expand/compute_instance_settings_fingerprint.erb' + - !ruby/object:Api::Type::NestedObject + name: 'metadata' + description: | + The metadata key/value pairs assigned to all the instances in the corresponding scope. + properties: + - !ruby/object:Api::Type::KeyValuePairs + name: 'items' + description: | + A metadata key/value items map. The total size of all keys and values must be less than 512KB diff --git a/mmv1/products/compute/NetworkAttachment.yaml b/mmv1/products/compute/NetworkAttachment.yaml index 4b90478d792e..95eb9d46418a 100644 --- a/mmv1/products/compute/NetworkAttachment.yaml +++ b/mmv1/products/compute/NetworkAttachment.yaml @@ -50,18 +50,19 @@ examples: resource_name: 'basic-network-attachment' network_name: 'basic-network' subnetwork_name: 'basic-subnetwork' + accepted_producer_project_name: 'prj-accepted' + rejected_producer_project_name: 'prj-rejected' test_env_vars: org_id: :ORG_ID billing_account: :BILLING_ACCT -# TODO uncomment once this resource is available at google_compute_instance resource -# - !ruby/object:Provider::Terraform::Examples -# name: 'network_attachment_instance_usage' -# primary_resource_id: 'default' -# vars: -# resource_name: 'basic-network-attachment' -# network_name: 'basic-network' -# subnetwork_name: 'basic-subnetwork' -# instance_name: 'basic-instance' + - !ruby/object:Provider::Terraform::Examples + name: 'network_attachment_instance_usage' + primary_resource_id: 'default' + vars: + resource_name: 'basic-network-attachment' + network_name: 'basic-network' + subnetwork_name: 'basic-subnetwork' + instance_name: 'basic-instance' parameters: - !ruby/object:Api::Type::String name: 'name' diff --git a/mmv1/products/compute/NetworkEndpointGroup.yaml b/mmv1/products/compute/NetworkEndpointGroup.yaml index 9e849d2b3f76..f37a3365d013 100644 --- a/mmv1/products/compute/NetworkEndpointGroup.yaml +++ b/mmv1/products/compute/NetworkEndpointGroup.yaml @@ -78,6 +78,7 @@ parameters: Zone where the network endpoint group is located. required: false default_from_api: true + ignore_read: true custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' properties: - !ruby/object:Api::Type::String @@ -109,11 +110,15 @@ properties: INTERNAL_MANAGED, and INTERNAL_SELF_MANAGED and 2) support the RATE or CONNECTION balancing modes. - Possible values include: GCE_VM_IP, GCE_VM_IP_PORT, and NON_GCP_PRIVATE_IP_PORT. + Possible values include: GCE_VM_IP, GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, INTERNET_IP_PORT, INTERNET_FQDN_PORT, SERVERLESS, and PRIVATE_SERVICE_CONNECT. values: - :GCE_VM_IP - :GCE_VM_IP_PORT - :NON_GCP_PRIVATE_IP_PORT + - :INTERNET_IP_PORT + - :INTERNET_FQDN_PORT + - :SERVERLESS + - :PRIVATE_SERVICE_CONNECT default_value: :GCE_VM_IP_PORT - !ruby/object:Api::Type::Integer name: 'size' diff --git a/mmv1/products/compute/NetworkPeeringRoutesConfig.yaml b/mmv1/products/compute/NetworkPeeringRoutesConfig.yaml index b939c63a33c8..bece6c506399 100644 --- a/mmv1/products/compute/NetworkPeeringRoutesConfig.yaml +++ b/mmv1/products/compute/NetworkPeeringRoutesConfig.yaml @@ -74,6 +74,11 @@ examples: network_name: 'container-network' subnetwork_name: 'container-subnetwork' gke_cluster_name: 'private-cluster' + deletion_protection: 'true' + test_vars_overrides: + deletion_protection: 'false' + oics_vars_overrides: + deletion_protection: 'false' custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: 'templates/terraform/encoders/network_peering_routes_config.go.erb' parameters: diff --git a/mmv1/products/compute/NodeGroup.yaml b/mmv1/products/compute/NodeGroup.yaml index 63e3ab1a8f8d..c8ba31a1074a 100644 --- a/mmv1/products/compute/NodeGroup.yaml +++ b/mmv1/products/compute/NodeGroup.yaml @@ -16,10 +16,11 @@ name: 'NodeGroup' kind: 'compute#NodeGroup' base_url: projects/{{project}}/zones/{{zone}}/nodeGroups create_url: projects/{{project}}/zones/{{zone}}/nodeGroups?initialNodeCount=PRE_CREATE_REPLACE_ME +update_verb: :PATCH +update_mask: true has_self_link: true description: | Represents a NodeGroup resource to manage a group of sole-tenant nodes. -immutable: true references: !ruby/object:Api::Resource::ReferenceLinks guides: 'Sole-Tenant Nodes': 'https://cloud.google.com/compute/docs/nodes/' @@ -43,12 +44,6 @@ async: !ruby/object:Api::OpAsync error: !ruby/object:Api::OpAsync::Error path: 'error/errors' message: 'message' -docs: !ruby/object:Provider::Terraform::Docs - warning: | - Due to limitations of the API, Terraform cannot update the - number of nodes in a node group and changes to node group size either - through Terraform config or through external changes will cause - Terraform to delete and recreate the node group. examples: - !ruby/object:Provider::Terraform::Examples name: 'node_group_basic' @@ -56,6 +51,13 @@ examples: vars: group_name: 'soletenant-group' template_name: 'soletenant-tmpl' + - !ruby/object:Provider::Terraform::Examples + name: 'node_group_maintenance_interval' + min_version: beta + primary_resource_id: 'nodes' + vars: + group_name: 'soletenant-group' + template_name: 'soletenant-tmpl' - !ruby/object:Provider::Terraform::Examples name: 'node_group_autoscaling_policy' primary_resource_id: 'nodes' @@ -112,21 +114,13 @@ properties: - !ruby/object:Api::Type::Integer name: 'size' description: | - The total number of nodes in the node group. One of `initial_size` or `size` must be specified. - immutable: true - send_empty_value: true - default_from_api: true - exactly_one_of: - - size - - initial_size + The total number of nodes in the node group. + output: true - !ruby/object:Api::Type::Integer name: 'initialSize' description: | - The initial number of nodes in the node group. One of `initial_size` or `size` must be specified. + The initial number of nodes in the node group. One of `initial_size` or `autoscaling_policy` must be configured on resource creation. url_param_only: true - exactly_one_of: - - size - - initial_size - !ruby/object:Api::Type::String name: 'maintenancePolicy' description: | @@ -147,6 +141,8 @@ properties: description: | If you use sole-tenant nodes for your workloads, you can use the node group autoscaler to automatically manage the sizes of your node groups. + + One of `initial_size` or `autoscaling_policy` must be configured on resource creation. default_from_api: true properties: - !ruby/object:Api::Type::Enum @@ -207,3 +203,14 @@ properties: required: true description: | The project id/number should be the same as the key of this project config in the project map. + - !ruby/object:Api::Type::Enum + name: 'maintenanceInterval' + min_version: beta + description: | + Specifies the frequency of planned maintenance events. Set to one of the following: + - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. + - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. + values: + - :AS_NEEDED + - :RECURRENT + default_from_api: true diff --git a/mmv1/products/compute/PerInstanceConfig.yaml b/mmv1/products/compute/PerInstanceConfig.yaml index 57c1b8bc3300..2c5d8e494298 100644 --- a/mmv1/products/compute/PerInstanceConfig.yaml +++ b/mmv1/products/compute/PerInstanceConfig.yaml @@ -124,6 +124,7 @@ parameters: url_param_only: true immutable: true ignore_read: true + default_from_api: true - !ruby/object:Api::Type::ResourceRef name: 'instanceGroupManager' resource: 'InstanceGroupManager' @@ -192,7 +193,6 @@ properties: - !ruby/object:Api::Type::Map name: 'internalIp' api_name: internalIPs - min_version: beta key_name: 'interface_name' description: | Preserved internal IPs defined for this instance. This map is keyed with the name of the network interface. @@ -220,7 +220,6 @@ properties: custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' - !ruby/object:Api::Type::Map name: 'externalIp' - min_version: beta api_name: externalIPs key_name: 'interface_name' description: | diff --git a/mmv1/products/compute/RegionAutoscaler.yaml b/mmv1/products/compute/RegionAutoscaler.yaml index e68955cf3728..931e07443408 100644 --- a/mmv1/products/compute/RegionAutoscaler.yaml +++ b/mmv1/products/compute/RegionAutoscaler.yaml @@ -65,6 +65,7 @@ parameters: required: false immutable: true default_from_api: true + ignore_read: true custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' properties: - !ruby/object:Api::Type::Time @@ -132,15 +133,11 @@ properties: instance may take to initialize. To do this, create an instance and time the startup process. default_value: 60 - - !ruby/object:Api::Type::Enum + - !ruby/object:Api::Type::String name: 'mode' - default_value: :ON + default_value: 'ON' description: | Defines operating mode for this policy. - values: - - :OFF - - :ONLY_UP - - :ON - !ruby/object:Api::Type::NestedObject name: 'scaleDownControl' min_version: beta diff --git a/mmv1/products/compute/RegionDisk.yaml b/mmv1/products/compute/RegionDisk.yaml index 94424537f21b..d421a950923f 100644 --- a/mmv1/products/compute/RegionDisk.yaml +++ b/mmv1/products/compute/RegionDisk.yaml @@ -218,7 +218,7 @@ properties: name: 'lastDetachTimestamp' description: 'Last detach timestamp in RFC3339 text format.' output: true - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | Labels to apply to this disk. A list of key->value pairs. diff --git a/mmv1/products/compute/RegionPerInstanceConfig.yaml b/mmv1/products/compute/RegionPerInstanceConfig.yaml index 86d4e45c6f7a..710864fcdaed 100644 --- a/mmv1/products/compute/RegionPerInstanceConfig.yaml +++ b/mmv1/products/compute/RegionPerInstanceConfig.yaml @@ -195,7 +195,6 @@ properties: - !ruby/object:Api::Type::Map name: 'internalIp' api_name: internalIPs - min_version: beta key_name: 'interface_name' description: | Preserved internal IPs defined for this instance. This map is keyed with the name of the network interface. @@ -223,7 +222,6 @@ properties: custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' - !ruby/object:Api::Type::Map name: 'externalIp' - min_version: beta api_name: externalIPs key_name: 'interface_name' description: | diff --git a/mmv1/products/compute/RegionSecurityPolicyRule.yaml b/mmv1/products/compute/RegionSecurityPolicyRule.yaml index 8580b3281d90..fecb3ca06c05 100644 --- a/mmv1/products/compute/RegionSecurityPolicyRule.yaml +++ b/mmv1/products/compute/RegionSecurityPolicyRule.yaml @@ -65,7 +65,7 @@ examples: - !ruby/object:Provider::Terraform::Examples name: 'region_security_policy_rule_with_network_match' # it needs to run synchronously because a region can have only one google_compute_network_edge_security_service. - # there is a robust handwritten test which covers this scenario. + # there is a robust handwritten test which covers this scenario. skip_test: true primary_resource_id: 'policy_rule_network_match' min_version: 'beta' diff --git a/mmv1/products/compute/ResourcePolicy.yaml b/mmv1/products/compute/ResourcePolicy.yaml index 2fc5f561273e..8e645187a41e 100644 --- a/mmv1/products/compute/ResourcePolicy.yaml +++ b/mmv1/products/compute/ResourcePolicy.yaml @@ -85,6 +85,7 @@ parameters: description: Region where resource policy resides. immutable: true required: false + ignore_read: true default_from_api: true custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' properties: diff --git a/mmv1/products/compute/RouterNat.yaml b/mmv1/products/compute/RouterNat.yaml index cb8831fdd386..9b0d414391a1 100644 --- a/mmv1/products/compute/RouterNat.yaml +++ b/mmv1/products/compute/RouterNat.yaml @@ -91,8 +91,22 @@ examples: address_name1: 'nat-address1' address_name2: 'nat-address2' address_name3: 'nat-address3' + - !ruby/object:Provider::Terraform::Examples + name: 'router_nat_private' + primary_resource_id: 'nat_type' + skip_test: true + min_version: beta + vars: + router_name: 'my-router' + nat_name: 'my-router-nat' + network_name: 'my-network' + subnet_name: 'my-subnetwork' + hub_name: 'my-hub' + spoke_name: 'my-spoke' custom_code: !ruby/object:Provider::Terraform::CustomCode constants: 'templates/terraform/constants/router_nat.go.erb' + pre_create: 'templates/terraform/constants/router_nat_validate_action_active_range.go.erb' + pre_update: 'templates/terraform/constants/router_nat_validate_action_active_range.go.erb' custom_diff: [ 'resourceComputeRouterNatDrainNatIpsCustomDiff', ] @@ -128,7 +142,7 @@ properties: function: 'verify.ValidateRFC1035Name(2, 63)' - !ruby/object:Api::Type::Enum name: 'natIpAllocateOption' - required: true + required: false description: | How external IPs should be allocated for this NAT. Valid values are `AUTO_ONLY` for only allowing NAT IPs allocated by Google Cloud @@ -368,10 +382,57 @@ properties: description: 'A reference to an address associated with this NAT' custom_expand: 'templates/terraform/custom_expand/array_resourceref_with_validation.go.erb' + - !ruby/object:Api::Type::Array + name: 'sourceNatActiveRanges' + min_version: beta + description: | + A list of URLs of the subnetworks used as source ranges for this NAT Rule. + These subnetworks must have purpose set to PRIVATE_NAT. + This field is used for private NAT. + is_set: true + set_hash_func: computeRouterNatRulesSubnetHash + custom_flatten: 'templates/terraform/custom_flatten/nat_rules_subnets_set.erb' + item_type: !ruby/object:Api::Type::ResourceRef + name: 'subnet' + resource: 'Subnetwork' + imports: 'selfLink' + description: + 'A reference to a subnetwork address associated with this NAT' + custom_expand: 'templates/terraform/custom_expand/array_resourceref_with_validation.go.erb' + - !ruby/object:Api::Type::Array + name: 'sourceNatDrainRanges' + min_version: beta + description: | + A list of URLs of subnetworks representing source ranges to be drained. + This is only supported on patch/update, and these subnetworks must have previously been used as active ranges in this NAT Rule. + This field is used for private NAT. + is_set: true + set_hash_func: computeRouterNatRulesSubnetHash + custom_flatten: 'templates/terraform/custom_flatten/nat_rules_subnets_set.erb' + item_type: !ruby/object:Api::Type::ResourceRef + name: 'subnet' + resource: 'Subnetwork' + imports: 'selfLink' + description: + 'A reference to a subnetwork address associated with this NAT' + custom_expand: 'templates/terraform/custom_expand/array_resourceref_with_validation.go.erb' - !ruby/object:Api::Type::Boolean name: enableEndpointIndependentMapping description: | - Specifies if endpoint independent mapping is enabled. This is enabled by default. For more information - see the [official documentation](https://cloud.google.com/nat/docs/overview#specs-rfcs). - default_value: true + Enable endpoint independent mapping. + For more information see the [official documentation](https://cloud.google.com/nat/docs/overview#specs-rfcs). + default_from_api: true send_empty_value: true + - !ruby/object:Api::Type::Enum + name: 'type' + immutable: true + min_version: beta + description: | + Indicates whether this NAT is used for public or private IP translation. + If unspecified, it defaults to PUBLIC. + If `PUBLIC` NAT used for public IP translation. + If `PRIVATE` NAT used for private IP translation. + values: + - :PUBLIC + - :PRIVATE + default_value: :PUBLIC diff --git a/mmv1/products/compute/ServiceAttachment.yaml b/mmv1/products/compute/ServiceAttachment.yaml index 8531c9ad02d1..f913840817a1 100644 --- a/mmv1/products/compute/ServiceAttachment.yaml +++ b/mmv1/products/compute/ServiceAttachment.yaml @@ -93,6 +93,7 @@ parameters: required: false immutable: true default_from_api: true + ignore_read: true custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' properties: - !ruby/object:Api::Type::String @@ -192,6 +193,7 @@ properties: An array of projects that are allowed to connect to this service attachment. send_empty_value: true + is_set: true item_type: !ruby/object:Api::Type::NestedObject properties: - !ruby/object:Api::Type::String @@ -207,12 +209,10 @@ properties: create. - !ruby/object:Api::Type::Boolean name: reconcileConnections - default_value: true + default_from_api: true send_empty_value: true description: | This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. - - For newly created service attachment, this boolean defaults to true. diff --git a/mmv1/products/compute/Snapshot.yaml b/mmv1/products/compute/Snapshot.yaml index 2f04ab0fb2a1..9d7ad7ffe598 100644 --- a/mmv1/products/compute/Snapshot.yaml +++ b/mmv1/products/compute/Snapshot.yaml @@ -236,7 +236,7 @@ properties: imports: 'selfLink' description: 'A reference to a license associated with this snapshot' custom_expand: 'templates/terraform/custom_expand/array_resourceref_with_validation.go.erb' - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: Labels to apply to this Snapshot. update_verb: :POST diff --git a/mmv1/products/compute/Subnetwork.yaml b/mmv1/products/compute/Subnetwork.yaml index 1c48d02c2f40..c15b6e3ca304 100644 --- a/mmv1/products/compute/Subnetwork.yaml +++ b/mmv1/products/compute/Subnetwork.yaml @@ -109,6 +109,20 @@ examples: vars: subnetwork_name: 'internal-ipv6-test-subnetwork' network_name: 'internal-ipv6-test-network' + - !ruby/object:Provider::Terraform::Examples + name: 'subnetwork_purpose_private_nat' + min_version: beta + primary_resource_id: 'subnetwork-purpose-private-nat' + vars: + subnetwork_name: 'subnet-purpose-test-subnetwork' + network_name: 'subnet-purpose-test-network' + - !ruby/object:Provider::Terraform::Examples + name: 'subnetwork_cidr_overlap' + min_version: beta + primary_resource_id: 'subnetwork-cidr-overlap' + vars: + subnetwork_name: 'subnet-cidr-overlap' + network_name: 'net-cidr-overlap' properties: - !ruby/object:Api::Type::Time name: 'creationTimestamp' @@ -165,10 +179,11 @@ properties: name: 'purpose' immutable: true description: | - The purpose of the resource. This field can be either `PRIVATE_RFC_1918`, `REGIONAL_MANAGED_PROXY`, `GLOBAL_MANAGED_PROXY`, or `PRIVATE_SERVICE_CONNECT`. + The purpose of the resource. This field can be either `PRIVATE_RFC_1918`, `REGIONAL_MANAGED_PROXY`, `GLOBAL_MANAGED_PROXY`, `PRIVATE_SERVICE_CONNECT` or `PRIVATE_NAT`([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)). A subnet with purpose set to `REGIONAL_MANAGED_PROXY` is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnetwork in a given region with purpose set to `GLOBAL_MANAGED_PROXY` is a proxy-only subnet and is shared between all the cross-regional Envoy-based load balancers. A subnetwork with purpose set to `PRIVATE_SERVICE_CONNECT` reserves the subnet for hosting a Private Service Connect published service. + A subnetwork with purpose set to `PRIVATE_NAT` is used as source range for Private NAT gateways. Note that `REGIONAL_MANAGED_PROXY` is the preferred setting for all regional Envoy load balancers. If unspecified, the purpose defaults to `PRIVATE_RFC_1918`. default_from_api: true @@ -371,7 +386,25 @@ properties: description: | The range of internal IPv6 addresses that are owned by this subnetwork. - !ruby/object:Api::Type::String - name: 'externalIpv6Prefix' + name: 'internalIpv6Prefix' output: true + description: | + The internal IPv6 address range that is assigned to this subnetwork. + - !ruby/object:Api::Type::String + name: 'externalIpv6Prefix' + default_from_api: true description: | The range of external IPv6 addresses that are owned by this subnetwork. + - !ruby/object:Api::Type::Boolean + name: 'allowSubnetCidrRoutesOverlap' + default_from_api: true + update_verb: :PATCH + update_url: projects/{{project}}/regions/{{region}}/subnetworks/{{name}} + fingerprint_name: 'fingerprint' + send_empty_value: true + min_version: beta + description: | + Typically packets destined to IPs within the subnetwork range that do not match + existing resources are dropped and prevented from leaving the VPC. + Setting this field to true will allow these packets to match dynamic routes injected + via BGP even if their destinations match existing subnet ranges. diff --git a/mmv1/products/compute/TargetHttpsProxy.yaml b/mmv1/products/compute/TargetHttpsProxy.yaml index aa255fe48128..b5fb90870efe 100644 --- a/mmv1/products/compute/TargetHttpsProxy.yaml +++ b/mmv1/products/compute/TargetHttpsProxy.yaml @@ -43,6 +43,9 @@ async: !ruby/object:Api::OpAsync error: !ruby/object:Api::OpAsync::Error path: 'error/errors' message: 'message' +custom_code: !ruby/object:Provider::Terraform::CustomCode + encoder: templates/terraform/encoders/compute_target_https_proxy.go.erb + decoder: templates/terraform/decoders/compute_target_https_proxy.go.erb examples: - !ruby/object:Provider::Terraform::Examples name: 'target_https_proxy_basic' @@ -62,6 +65,26 @@ examples: url_map_name: 'url-map' backend_service_name: 'backend-service' http_health_check_name: 'http-health-check' + - !ruby/object:Provider::Terraform::Examples + name: 'target_https_proxy_mtls' + primary_resource_id: 'default' + min_version: 'beta' + vars: + target_https_proxy_name: 'test-mtls-proxy' + ssl_certificate_name: 'my-certificate' + url_map_name: 'url-map' + backend_service_name: 'backend-service' + http_health_check_name: 'http-health-check' + server_tls_policy_name: 'my-tls-policy' + trust_config_name: 'my-trust-config' + - !ruby/object:Provider::Terraform::Examples + name: 'target_https_proxy_certificate_manager_certificate' + primary_resource_id: 'default' + vars: + target_https_proxy_name: 'target-http-proxy' + certificate_manager_certificate_name: 'my-certificate' + url_map_name: 'url-map' + backend_service_name: 'backend-service' properties: - !ruby/object:Api::Type::Time name: 'creationTimestamp' @@ -103,19 +126,36 @@ properties: update_url: 'projects/{{project}}/global/targetHttpsProxies/{{name}}/setQuicOverride' default_value: :NONE custom_flatten: 'templates/terraform/custom_flatten/default_if_empty.erb' + - !ruby/object:Api::Type::Array + name: 'certificateManagerCertificates' + description: | + URLs to certificate manager certificate resources that are used to authenticate connections between users and the load balancer. + Currently, you may specify up to 15 certificates. Certificate manager certificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. + sslCertificates and certificateManagerCertificates fields can not be defined together. + Accepted format is `//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificates/{resourceName}` or just the self_link `projects/{project}/locations/{location}/certificates/{resourceName}` + update_verb: :POST + update_url: 'projects/{{project}}/targetHttpsProxies/{{name}}/setSslCertificates' + item_type: Api::Type::String + custom_expand: 'templates/terraform/custom_expand/certificate_manager_certificate_construct_full_url.go.erb' + diff_suppress_func: 'tpgresource.CompareResourceNames' + conflicts: + - ssl_certificates - !ruby/object:Api::Type::Array name: 'sslCertificates' description: | - A list of SslCertificate resource URLs or Certificate Manager certificate URLs that are used to authenticate - connections between users and the load balancer. At least one resource must be specified. + URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. + Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. + sslCertificates and certificateManagerCertificates can not be defined together. update_verb: :POST update_url: 'projects/{{project}}/targetHttpsProxies/{{name}}/setSslCertificates' item_type: !ruby/object:Api::Type::ResourceRef name: 'sslCertificate' resource: 'SslCertificate' imports: 'selfLink' - description: 'The SSL certificate URL or Certificate Manager certificate resource URL used by this TargetHttpsProxy' + description: 'The SSL certificate URL used by this TargetHttpsProxy' custom_expand: 'templates/terraform/custom_expand/array_resourceref_with_validation.go.erb' + conflicts: + - certificate_manager_certificates - !ruby/object:Api::Type::String name: 'certificateMap' description: | @@ -161,3 +201,17 @@ properties: external HTTP(S) load balancer, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For Global external HTTP(S) load balancer (classic), this option is not available publicly. + - !ruby/object:Api::Type::ResourceRef + name: 'serverTlsPolicy' + resource: 'SslPolicy' + imports: 'selfLink' + description: | + A URL referring to a networksecurity.ServerTlsPolicy + resource that describes how the proxy should authenticate inbound + traffic. serverTlsPolicy only applies to a global TargetHttpsProxy + attached to globalForwardingRules with the loadBalancingScheme + set to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED. + For details which ServerTlsPolicy resources are accepted with + INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED + loadBalancingScheme consult ServerTlsPolicy documentation. + If left blank, communications are not encrypted. diff --git a/mmv1/products/compute/VpnTunnel.yaml b/mmv1/products/compute/VpnTunnel.yaml index 3020328720d5..f1724d01ef68 100644 --- a/mmv1/products/compute/VpnTunnel.yaml +++ b/mmv1/products/compute/VpnTunnel.yaml @@ -230,7 +230,7 @@ properties: is_set: true default_from_api: true item_type: Api::Type::String - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: Labels to apply to this VpnTunnel. update_verb: :POST diff --git a/mmv1/products/containerattached/Cluster.yaml b/mmv1/products/containerattached/Cluster.yaml index 3d64fa3485b5..126fb11001e0 100644 --- a/mmv1/products/containerattached/Cluster.yaml +++ b/mmv1/products/containerattached/Cluster.yaml @@ -202,8 +202,8 @@ properties: description: | The Kubernetes version of the cluster. output: true - - !ruby/object:Api::Type::KeyValuePairs - name: annotations + - !ruby/object:Api::Type::KeyValueAnnotations + name: 'annotations' description: | Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and @@ -293,6 +293,16 @@ properties: ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the users. Up to ten admin users can be provided. + For more info on RBAC, see + https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: adminGroups + description: | + Groups that can perform operations as a cluster admin. A managed + ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole + to the groups. Up to ten admin groups can be provided. + For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles item_type: Api::Type::String @@ -313,3 +323,17 @@ properties: name: enabled description: | Enable Managed Collection. + - !ruby/object:Api::Type::NestedObject + name: binaryAuthorization + description: | + Binary Authorization configuration. + allow_empty_object: true + default_from_api: true + properties: + - !ruby/object:Api::Type::Enum + name: evaluationMode + description: | + Configure Binary Authorization evaluation mode. + values: + - :DISABLED + - :PROJECT_SINGLETON_POLICY_ENFORCE diff --git a/mmv1/products/databasemigrationservice/connectionprofile.yaml b/mmv1/products/databasemigrationservice/ConnectionProfile.yaml similarity index 77% rename from mmv1/products/databasemigrationservice/connectionprofile.yaml rename to mmv1/products/databasemigrationservice/ConnectionProfile.yaml index 3396127ae17b..cf484fe4addc 100644 --- a/mmv1/products/databasemigrationservice/connectionprofile.yaml +++ b/mmv1/products/databasemigrationservice/ConnectionProfile.yaml @@ -78,6 +78,14 @@ examples: sqldb_user: 'my-username' sqldb_pass: 'my-password' profile: 'my-profileid' + - !ruby/object:Provider::Terraform::Examples + name: 'database_migration_service_connection_profile_oracle' + primary_resource_id: 'oracleprofile' + ignore_read_extra: + - 'oracle.0.password' + vars: + profile: 'my-profileid' + skip_test: true - !ruby/object:Provider::Terraform::Examples name: 'database_migration_service_connection_profile_alloydb' primary_resource_id: 'alloydbprofile' @@ -87,8 +95,7 @@ examples: profile: 'my-profileid' global_address_name: 'private-ip-alloc' network_name: 'vpc-network' - test_vars_overrides: - network_name: 'acctest.BootstrapSharedTestNetwork(t, "profile-alloydb")' + skip_test: true parameters: - !ruby/object:Api::Type::String name: 'connectionProfileId' @@ -118,7 +125,7 @@ properties: output: true description: | Output only. The timestamp when the resource was created. A timestamp in RFC3339 UTC 'Zulu' format, accurate to nanoseconds. Example: '2014-10-02T15:01:23.045123456Z'. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | The resource labels for connection profile to use to annotate any related underlying resources such as Compute Engine VMs. @@ -171,6 +178,7 @@ properties: exactly_one_of: - mysql - postgresql + - oracle - cloudsql - alloydb properties: @@ -252,6 +260,7 @@ properties: exactly_one_of: - mysql - postgresql + - oracle - cloudsql - alloydb properties: @@ -337,8 +346,165 @@ properties: Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with. values: - :NETWORK_ARCHITECTURE_OLD_CSQL_PRODUCER - - :NETWORK_ARCHITECTURE_NEW_CSQL_PRODUCER - - !ruby/object:Api::Type::NestedObject + - :NETWORK_ARCHITECTURE_NEW_CSQL_PRODUCER + - !ruby/object:Api::Type::NestedObject + name: 'oracle' + description: | + Specifies connection parameters required specifically for Oracle databases. + exactly_one_of: + - mysql + - postgresql + - oracle + - cloudsql + - alloydb + properties: + - !ruby/object:Api::Type::String + name: 'host' + required: true + description: | + Required. The IP or hostname of the source Oracle database. + - !ruby/object:Api::Type::Integer + name: 'port' + required: true + description: | + Required. The network port of the source Oracle database. + - !ruby/object:Api::Type::String + name: 'username' + required: true + description: | + Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + - !ruby/object:Api::Type::String + name: 'password' + required: true + immutable: true + description: | + Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + This field is not returned on request, and the value is encrypted when stored in Database Migration Service. + sensitive: true + custom_flatten: templates/terraform/custom_flatten/database_migration_service_connection_profile_oracle_password.go.erb + - !ruby/object:Api::Type::Boolean + name: 'passwordSet' + output: true + description: | + Output only. Indicates If this connection profile password is stored. + - !ruby/object:Api::Type::String + name: 'databaseService' + required: true + description: | + Required. Database service for the Oracle connection. + - !ruby/object:Api::Type::NestedObject + name: 'ssl' + description: | + SSL configuration for the destination to connect to the source database. + properties: + - !ruby/object:Api::Type::Enum + name: 'type' + output: true + description: | + The current connection profile state. + values: + - :SERVER_ONLY + - :SERVER_CLIENT + - !ruby/object:Api::Type::String + name: 'clientKey' + immutable: true + required_with: + - client_certificate + description: | + Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. + If this field is used then the 'clientCertificate' field is mandatory. + sensitive: true + custom_flatten: templates/terraform/custom_flatten/database_migration_service_connection_profile_oracle_ssl_client_key.go.erb + - !ruby/object:Api::Type::String + name: 'clientCertificate' + immutable: true + required_with: + - client_key + description: | + Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server. + If this field is used then the 'clientKey' field is mandatory + sensitive: true + custom_flatten: templates/terraform/custom_flatten/database_migration_service_connection_profile_oracle_ssl_client_certificate.go.erb + - !ruby/object:Api::Type::String + name: 'caCertificate' + immutable: true + required: true + sensitive: true + custom_flatten: templates/terraform/custom_flatten/database_migration_service_connection_profile_oracle_ssl_ca_certificate.go.erb + description: | + Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. + The replica will use this certificate to verify it's connecting to the right host. + - !ruby/object:Api::Type::NestedObject + name: 'staticServiceIpConnectivity' + send_empty_value: true + allow_empty_object: true + description: | + This object has no nested fields. + + Static IP address connectivity configured on service project. + properties: [] + exactly_one_of: + - static_service_ip_connectivity + - forward_ssh_connectivity + - private_connectivity + - !ruby/object:Api::Type::NestedObject + name: 'forwardSshConnectivity' + description: | + SSL configuration for the destination to connect to the source database. + exactly_one_of: + - static_service_ip_connectivity + - forward_ssh_connectivity + - private_connectivity + properties: + - !ruby/object:Api::Type::String + name: 'hostname' + required: true + description: | + Required. Hostname for the SSH tunnel. + - !ruby/object:Api::Type::String + name: 'username' + required: true + description: | + Required. Username for the SSH tunnel. + - !ruby/object:Api::Type::Integer + name: 'port' + required: true + description: | + Port for the SSH tunnel, default value is 22. + - !ruby/object:Api::Type::String + name: 'password' + immutable: true + sensitive: true + description: | + Input only. SSH password. Only one of `password` and `private_key` can be configured. + exactly_one_of: + - forward_ssh_connectivity.0.password + - forward_ssh_connectivity.0.private_key + custom_flatten: templates/terraform/custom_flatten/database_migration_service_connection_profile_oracle_forward_ssh_password.go.erb + - !ruby/object:Api::Type::String + name: 'privateKey' + immutable: true + sensitive: true + description: | + Input only. SSH private key. Only one of `password` and `private_key` can be configured. + exactly_one_of: + - oracle.0.forward_ssh_connectivity.0.password + - oracle.0.forward_ssh_connectivity.0.private_key + custom_flatten: templates/terraform/custom_flatten/database_migration_service_connection_profile_oracle_forward_ssh_private_key.go.erb + - !ruby/object:Api::Type::NestedObject + name: 'privateConnectivity' + description: | + Configuration for using a private network to communicate with the source database + exactly_one_of: + - oracle.0.static_service_ip_connectivity + - oracle.0.forward_ssh_connectivity + - oracle.0.private_connectivity + properties: + - !ruby/object:Api::Type::String + name: 'privateConnection' + required: true + description: | + Required. The resource name (URI) of the private connection. - !ruby/object:Api::Type::NestedObject name: 'cloudsql' description: | @@ -346,6 +512,7 @@ properties: exactly_one_of: - mysql - postgresql + - oracle - cloudsql - alloydb properties: @@ -503,6 +670,7 @@ properties: exactly_one_of: - mysql - postgresql + - oracle - cloudsql - alloydb properties: diff --git a/mmv1/products/databasemigrationservice/PrivateConnection.yaml b/mmv1/products/databasemigrationservice/PrivateConnection.yaml new file mode 100644 index 000000000000..fc0564b32d6d --- /dev/null +++ b/mmv1/products/databasemigrationservice/PrivateConnection.yaml @@ -0,0 +1,112 @@ +# Copyright 2023 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'PrivateConnection' +base_url: 'projects/{{project}}/locations/{{location}}/privateConnections' +create_url: 'projects/{{project}}/locations/{{location}}/privateConnections?privateConnectionId={{private_connection_id}}' +self_link: 'projects/{{project}}/locations/{{location}}/privateConnections/{{private_connection_id}}' +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': 'https://cloud.google.com/database-migration/docs/oracle-to-postgresql/create-private-connectivity-configuration' + api: 'https://cloud.google.com/database-migration/docs/reference/rest/v1/projects.locations.privateConnections' +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{{op_id}}' + wait_ms: 1000 +timeouts: !ruby/object:Api::Timeouts + insert_minutes: 60 + delete_minutes: 60 +autogen_async: true +description: | + The PrivateConnection resource is used to establish private connectivity between Database Migration Service and a customer's network. +immutable: true +id_format: projects/{{project}}/locations/{{location}}/privateConnections/{{private_connection_id}} +import_format: + [ + 'projects/{{project}}/locations/{{location}}/privateConnections/{{private_connection_id}}', + ] +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'database_migration_service_private_connection' + primary_resource_id: 'default' + vars: + private_connection_id: 'my-connection' + network_name: 'my-network' + test_vars_overrides: + network_name: 'acctest.BootstrapSharedTestNetwork(t, "dbms-privateconnection")' +parameters: + - !ruby/object:Api::Type::String + name: privateConnectionId + description: | + The private connectivity identifier. + required: true + immutable: true + url_param_only: true + - !ruby/object:Api::Type::String + name: 'location' + description: | + The name of the location this private connection is located in. + required: true + immutable: true + url_param_only: true +properties: + - !ruby/object:Api::Type::String + name: 'name' + output: true + description: The resource's name. + - !ruby/object:Api::Type::KeyValueLabels + name: 'labels' + description: Labels. + - !ruby/object:Api::Type::String + name: 'displayName' + description: Display name. + default_from_api: true + - !ruby/object:Api::Type::String + name: 'state' + description: | + State of the PrivateConnection. + output: true + - !ruby/object:Api::Type::NestedObject + name: 'error' + output: true + description: | + The PrivateConnection error in case of failure. + properties: + - !ruby/object:Api::Type::String + name: 'message' + description: | + A message containing more information about the error that occurred. + - !ruby/object:Api::Type::KeyValuePairs + name: 'details' + description: | + A list of messages that carry the error details. + - !ruby/object:Api::Type::NestedObject + name: 'vpcPeeringConfig' + required: true + description: | + The VPC Peering configuration is used to create VPC peering + between databasemigrationservice and the consumer's VPC. + properties: + - !ruby/object:Api::Type::String + name: 'vpcName' + required: true + description: | + Fully qualified name of the VPC that Database Migration Service will peer to. + Format: projects/{project}/global/{networks}/{name} + - !ruby/object:Api::Type::String + name: 'subnet' + required: true + description: | + A free subnet for peering. (CIDR of /29) diff --git a/mmv1/products/dataform/Repository.yaml b/mmv1/products/dataform/Repository.yaml index 10d3596fd1d1..27de65183388 100644 --- a/mmv1/products/dataform/Repository.yaml +++ b/mmv1/products/dataform/Repository.yaml @@ -36,6 +36,14 @@ examples: git_repository_name: 'my/repository' dataform_repository_name: 'dataform_repository' data: secret-data + - !ruby/object:Provider::Terraform::Examples + name: 'dataform_repository_ssh' + primary_resource_id: dataform_respository + min_version: beta + vars: + git_repository_name: 'my/repository' + dataform_repository_name: 'dataform_repository' + data: secret-data parameters: - !ruby/object:Api::Type::String name: 'region' @@ -64,11 +72,28 @@ properties: description: The Git remote's default branch name. - !ruby/object:Api::Type::String name: 'authenticationTokenSecretVersion' - required: true + exactly_one_of: + - gitRemoteSettings.0.authenticationTokenSecretVersion + - gitRemoteSettings.0.sshAuthenticationConfig description: The name of the Secret Manager secret version to use as an - authentication token for Git operations. Must be in the format + authentication token for Git operations. This secret is for assigning with HTTPS only(for SSH use `ssh_authentication_config`). Must be in the format projects/*/secrets/*/versions/*. + - !ruby/object:Api::Type::NestedObject + name: 'sshAuthenticationConfig' + exactly_one_of: + - gitRemoteSettings.0.authenticationTokenSecretVersion + - gitRemoteSettings.0.sshAuthenticationConfig + description: Authentication fields for remote uris using SSH protocol. + properties: + - !ruby/object:Api::Type::String + name: userPrivateKeySecretVersion + required: true + description: The name of the Secret Manager secret version to use as a ssh private key for Git operations. Must be in the format projects/*/secrets/*/versions/*. + - !ruby/object:Api::Type::String + name: hostPublicKey + required: true + description: Content of a public SSH key to verify an identity of a remote Git host. - !ruby/object:Api::Type::String name: 'tokenStatus' output: true @@ -76,14 +101,17 @@ properties: Indicates the status of the Git access token. https://cloud.google.com/dataform/reference/rest/v1beta1/projects.locations.repositories#TokenStatus - !ruby/object:Api::Type::NestedObject name: 'workspaceCompilationOverrides' - description: Optional. If set, fields of workspaceCompilationOverrides override the default compilation settings that are specified in dataform.json when creating workspace-scoped compilation results. + description: If set, fields of workspaceCompilationOverrides override the default compilation settings that are specified in dataform.json when creating workspace-scoped compilation results. properties: - !ruby/object:Api::Type::String name: defaultDatabase - description: Optional. The default database (Google Cloud project ID). + description: The default database (Google Cloud project ID). - !ruby/object:Api::Type::String name: 'schemaSuffix' - description: Optional. The suffix that should be appended to all schema (BigQuery dataset ID) names. + description: The suffix that should be appended to all schema (BigQuery dataset ID) names. - !ruby/object:Api::Type::String name: 'tablePrefix' - description: Optional. The prefix that should be prepended to all table names. + description: The prefix that should be prepended to all table names. + - !ruby/object:Api::Type::String + name: 'serviceAccount' + description: The service account to run workflow invocations under. diff --git a/mmv1/products/dataform/WorkflowConfig.yaml b/mmv1/products/dataform/WorkflowConfig.yaml index 36dcdefe0b54..c45d563d7744 100644 --- a/mmv1/products/dataform/WorkflowConfig.yaml +++ b/mmv1/products/dataform/WorkflowConfig.yaml @@ -35,6 +35,7 @@ examples: vars: workflow_name: 'my_workflow' release_name: 'my_release' + service_account_name: 'dataform-sa' git_repository_name: 'my/repository' dataform_repository_name: 'dataform_repository' data: secret-data diff --git a/mmv1/products/datafusion/Instance.yaml b/mmv1/products/datafusion/Instance.yaml index 17f0333f65d8..ccd0efd1c762 100644 --- a/mmv1/products/datafusion/Instance.yaml +++ b/mmv1/products/datafusion/Instance.yaml @@ -141,7 +141,7 @@ properties: name: 'enableRbac' description: | Option to enable granular role-based access control. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | The resource labels for instance to use to annotate any related underlying resources, diff --git a/mmv1/products/dataplex/Datascan.yaml b/mmv1/products/dataplex/Datascan.yaml index ee861ea31b5c..c3900509e163 100644 --- a/mmv1/products/dataplex/Datascan.yaml +++ b/mmv1/products/dataplex/Datascan.yaml @@ -126,7 +126,7 @@ properties: name: 'displayName' description: | User friendly display name. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | User-defined labels for the scan. A list of key->value pairs. @@ -489,387 +489,3 @@ properties: Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'. item_type: Api::Type::String - - !ruby/object:Api::Type::NestedObject - name: 'dataQualityResult' - output: true - deprecation_message: "`data_quality_result` is deprecated and will be removed in a future major release." - description: | - The result of the data quality scan. - properties: - - !ruby/object:Api::Type::Boolean - name: 'passed' - output: true - description: | - Overall data quality result -- true if all rules passed. - - !ruby/object:Api::Type::Array - name: 'dimensions' - description: | - A list of results at the dimension level. - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::Boolean - name: 'passed' - description: | - Whether the dimension passed or failed. - - !ruby/object:Api::Type::Array - name: 'rules' - output: true - description: | - A list of all the rules in a job, and their results. - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::NestedObject - name: 'rule' - output: true - description: | - The rule specified in the DataQualitySpec, as is. - properties: - - !ruby/object:Api::Type::String - name: 'column' - description: | - The unnested column which this rule is evaluated against. - - !ruby/object:Api::Type::Boolean - name: 'ignoreNull' - description: | - Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules. - - !ruby/object:Api::Type::String - name: 'dimension' - description: | - The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"] - - !ruby/object:Api::Type::Integer - name: 'threshold' - description: | - The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0). - - !ruby/object:Api::Type::NestedObject - name: 'rangeExpectation' - output: true - description: | - ColumnMap rule which evaluates whether each column value lies between a specified range. - properties: - - !ruby/object:Api::Type::String - name: 'minValue' - description: | - The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided. - - !ruby/object:Api::Type::String - name: maxValue - description: | - The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided. - - !ruby/object:Api::Type::Boolean - name: 'strictMinEnabled' - default_value: false - description: | - Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. - Only relevant if a minValue has been defined. Default = false. - - !ruby/object:Api::Type::Boolean - name: 'strictMaxEnabled' - default_value: false - description: | - Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. - Only relevant if a maxValue has been defined. Default = false. - - !ruby/object:Api::Type::NestedObject - name: 'nonNullExpectation' - output: true - allow_empty_object: true - description: | - ColumnMap rule which evaluates whether each column value is null. - properties: [] - - !ruby/object:Api::Type::NestedObject - name: 'setExpectation' - output: true - description: | - ColumnMap rule which evaluates whether each column value is contained by a specified set. - properties: - - !ruby/object:Api::Type::Array - name: 'values' - description: | - Expected values for the column value. - item_type: Api::Type::String - - !ruby/object:Api::Type::NestedObject - name: 'regexExpectation' - output: true - description: | - ColumnMap rule which evaluates whether each column value matches a specified regex. - properties: - - !ruby/object:Api::Type::String - name: 'regex' - description: | - A regular expression the column value is expected to match. - - !ruby/object:Api::Type::NestedObject - name: 'uniquenessExpectation' - output: true - allow_empty_object: true - description: | - ColumnAggregate rule which evaluates whether the column has duplicates. - properties: [] - - !ruby/object:Api::Type::NestedObject - name: 'statisticRangeExpectation' - output: true - description: | - ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. - properties: - - !ruby/object:Api::Type::Enum - name: 'statistic' - description: | - column statistics. - values: - - :STATISTIC_UNDEFINED - - :MEAN - - :MIN - - :MAX - - !ruby/object:Api::Type::String - name: 'minValue' - description: | - The minimum column statistic value allowed for a row to pass this validation. - At least one of minValue and maxValue need to be provided. - - !ruby/object:Api::Type::String - name: 'maxValue' - description: | - The maximum column statistic value allowed for a row to pass this validation. - At least one of minValue and maxValue need to be provided. - - !ruby/object:Api::Type::Boolean - name: 'strictMinEnabled' - description: | - Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. - Only relevant if a minValue has been defined. Default = false. - - !ruby/object:Api::Type::Boolean - name: 'strictMaxEnabled' - description: | - Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. - Only relevant if a maxValue has been defined. Default = false. - - !ruby/object:Api::Type::NestedObject - name: 'rowConditionExpectation' - output: true - description: | - Table rule which evaluates whether each row passes the specified condition. - properties: - - !ruby/object:Api::Type::String - name: 'sqlExpression' - description: | - The SQL expression. - - !ruby/object:Api::Type::NestedObject - name: 'tableConditionExpectation' - output: true - description: | - Table rule which evaluates whether the provided expression is true. - properties: - - !ruby/object:Api::Type::String - name: 'sqlExpression' - description: | - The SQL expression. - - !ruby/object:Api::Type::Boolean - name: 'passed' - output: true - description: | - Whether the rule passed or failed. - - !ruby/object:Api::Type::String - name: 'evaluatedCount' - output: true - description: | - The number of rows a rule was evaluated against. This field is only valid for ColumnMap type rules. - Evaluated count can be configured to either - 1. include all rows (default) - with null rows automatically failing rule evaluation, or - 2. exclude null rows from the evaluatedCount, by setting ignore_nulls = true. - - !ruby/object:Api::Type::String - name: 'passedCount' - output: true - description: | - The number of rows which passed a rule evaluation. This field is only valid for ColumnMap type rules. - - !ruby/object:Api::Type::String - name: 'nullCount' - output: true - description: | - The number of rows with null values in the specified column. - - !ruby/object:Api::Type::Integer - name: 'passRatio' - output: true - description: | - The ratio of passedCount / evaluatedCount. This field is only valid for ColumnMap type rules. - - !ruby/object:Api::Type::String - name: 'failingRowsQuery' - output: true - description: | - The query to find rows that did not pass this rule. Only applies to ColumnMap and RowCondition rules. - - !ruby/object:Api::Type::String - name: 'rowCount' - output: true - description: | - The count of rows processed. - - !ruby/object:Api::Type::NestedObject - name: 'scannedData' - output: true - description: | - The data scanned for this result. - properties: - - !ruby/object:Api::Type::NestedObject - name: 'incrementalField' - description: | - The range denoted by values of an incremental field - properties: - - !ruby/object:Api::Type::String - name: 'field' - description: | - The field that contains values which monotonically increases over time (e.g. a timestamp column). - - !ruby/object:Api::Type::String - name: 'start' - description: | - Value that marks the start of the range. - - !ruby/object:Api::Type::String - name: 'end' - description: Value that marks the end of the range. - - !ruby/object:Api::Type::NestedObject - name: 'dataProfileResult' - output: true - deprecation_message: "`data_profile_result` is deprecated and will be removed in a future major release." - custom_flatten: templates/terraform/custom_flatten/dataplex_datascan_ignore_profile_result.go.erb - description: | - The result of the data profile scan. - properties: - - !ruby/object:Api::Type::String - name: 'rowCount' - description: | - The count of rows scanned. - - !ruby/object:Api::Type::NestedObject - name: 'profile' - output: true - description: | - The profile information per field. - properties: - - !ruby/object:Api::Type::Array - name: 'fields' - description: | - List of fields with structural and profile information for each field. - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::String - name: 'name' - description: | - The name of the field. - - !ruby/object:Api::Type::String - name: 'type' - description: | - The field data type. - - !ruby/object:Api::Type::String - name: 'mode' - description: | - The mode of the field. Possible values include: - 1. REQUIRED, if it is a required field. - 2. NULLABLE, if it is an optional field. - 3. REPEATED, if it is a repeated field. - - !ruby/object:Api::Type::NestedObject - name: 'profile' - description: | - Profile information for the corresponding field. - properties: - - !ruby/object:Api::Type::Integer - name: 'nullRatio' - output: true - description: | - Ratio of rows with null value against total scanned rows. - - !ruby/object:Api::Type::Integer - name: 'distinctRatio' - description: | - Ratio of rows with distinct values against total scanned rows. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode. - - !ruby/object:Api::Type::NestedObject - name: 'topNValues' - description: | - The list of top N non-null values and number of times they occur in the scanned data. N is 10 or equal to the number of distinct values in the field, whichever is smaller. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode. - properties: - - !ruby/object:Api::Type::String - name: 'value' - description: | - String value of a top N non-null value. - - !ruby/object:Api::Type::String - name: 'count' - description: | - Count of the corresponding value in the scanned data. - - !ruby/object:Api::Type::NestedObject - name: 'stringProfile' - output: true - description: | - String type field information. - properties: - - !ruby/object:Api::Type::String - name: 'minLength' - description: | - Minimum length of non-null values in the scanned data. - - !ruby/object:Api::Type::String - name: 'maxLength' - description: | - Maximum length of non-null values in the scanned data. - - !ruby/object:Api::Type::Integer - name: 'averageLength' - description: | - Average length of non-null values in the scanned data. - - !ruby/object:Api::Type::NestedObject - name: 'integerProfile' - output: true - description: | - Integer type field information. - properties: - - !ruby/object:Api::Type::Integer - name: 'average' - description: | - Average of non-null values in the scanned data. NaN, if the field has a NaN. - - !ruby/object:Api::Type::Integer - name: 'standardDeviation' - description: | - Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN. - - !ruby/object:Api::Type::String - name: 'min' - description: | - Minimum of non-null values in the scanned data. NaN, if the field has a NaN. - - !ruby/object:Api::Type::String - name: 'quartiles' - description: | - A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3. - - !ruby/object:Api::Type::String - name: 'max' - description: | - Maximum of non-null values in the scanned data. NaN, if the field has a NaN. - - !ruby/object:Api::Type::NestedObject - name: 'doubleProfile' - output: true - description: | - Double type field information. - properties: - - !ruby/object:Api::Type::Integer - name: 'average' - description: | - Average of non-null values in the scanned data. NaN, if the field has a NaN. - - !ruby/object:Api::Type::Integer - name: 'standardDeviation' - description: | - Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN. - - !ruby/object:Api::Type::String - name: 'min' - description: | - Minimum of non-null values in the scanned data. NaN, if the field has a NaN. - - !ruby/object:Api::Type::String - name: 'quartiles' - description: | - A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3. - - !ruby/object:Api::Type::String - name: 'max' - description: | - Maximum of non-null values in the scanned data. NaN, if the field has a NaN. - - !ruby/object:Api::Type::NestedObject - name: 'scannedData' - output: true - description: The data scanned for this result. - properties: - - !ruby/object:Api::Type::NestedObject - name: 'incrementalField' - description: | - The range denoted by values of an incremental field - properties: - - !ruby/object:Api::Type::String - name: 'field' - description: | - The field that contains values which monotonically increases over time (e.g. a timestamp column). - - !ruby/object:Api::Type::String - name: 'start' - description: | - Value that marks the start of the range. - - !ruby/object:Api::Type::String - name: 'end' - description: Value that marks the end of the range. diff --git a/mmv1/products/dataplex/Task.yaml b/mmv1/products/dataplex/Task.yaml index 3a4095794264..66ed12c16dbb 100644 --- a/mmv1/products/dataplex/Task.yaml +++ b/mmv1/products/dataplex/Task.yaml @@ -116,7 +116,7 @@ properties: - :CREATING - :DELETING - :ACTION_REQUIRED - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | User-defined labels for the task. diff --git a/mmv1/products/datastream/ConnectionProfile.yaml b/mmv1/products/datastream/ConnectionProfile.yaml index 5e5258e765b6..3a8a4d94879f 100644 --- a/mmv1/products/datastream/ConnectionProfile.yaml +++ b/mmv1/products/datastream/ConnectionProfile.yaml @@ -81,7 +81,7 @@ properties: name: 'name' output: true description: The resource's name. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: Labels. - !ruby/object:Api::Type::String diff --git a/mmv1/products/datastream/PrivateConnection.yaml b/mmv1/products/datastream/PrivateConnection.yaml index dcd9a6da8d71..c2a10d6c8bce 100644 --- a/mmv1/products/datastream/PrivateConnection.yaml +++ b/mmv1/products/datastream/PrivateConnection.yaml @@ -59,7 +59,7 @@ properties: name: 'name' output: true description: The resource's name. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: Labels. - !ruby/object:Api::Type::String diff --git a/mmv1/products/datastream/Stream.yaml b/mmv1/products/datastream/Stream.yaml index 039b51a75457..a88ed1eeb73c 100644 --- a/mmv1/products/datastream/Stream.yaml +++ b/mmv1/products/datastream/Stream.yaml @@ -157,7 +157,7 @@ properties: name: 'name' output: true description: The stream's name. - - !ruby/object:Api::Type::KeyValuePairs + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: Labels. - !ruby/object:Api::Type::String diff --git a/mmv1/products/dialogflowcx/Agent.yaml b/mmv1/products/dialogflowcx/Agent.yaml index 1d9580af5d6e..086400417dda 100644 --- a/mmv1/products/dialogflowcx/Agent.yaml +++ b/mmv1/products/dialogflowcx/Agent.yaml @@ -31,6 +31,9 @@ examples: primary_resource_id: 'full_agent' vars: agent_name: 'dialogflowcx-agent' + bucket_name: 'dialogflowcx-bucket' + ignore_read_extra: + - git_integration_settings.0.github_settings.0.access_token id_format: 'projects/{{project}}/locations/{{location}}/agents/{{name}}' import_format: ['projects/{{project}}/locations/{{location}}/agents/{{name}}'] skip_sweeper: true @@ -111,3 +114,99 @@ properties: name: 'enableSpellCorrection' description: | Indicates if automatic spell correction is enabled in detect intent requests. + - !ruby/object:Api::Type::NestedObject + name: 'advancedSettings' + description: | + Hierarchical advanced settings for this agent. The settings exposed at the lower level overrides the settings exposed at the higher level. + Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. + default_from_api: true + properties: + - !ruby/object:Api::Type::NestedObject + name: 'audioExportGcsDestination' + description: | + If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: + * Agent level + * Flow level + properties: + - !ruby/object:Api::Type::String + name: 'uri' + description: | + The Google Cloud Storage URI for the exported objects. Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. + Format: gs://bucket/object-name-or-prefix + - !ruby/object:Api::Type::NestedObject + name: 'dtmfSettings' + description: | + Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: + * Agent level + * Flow level + * Page level + * Parameter level + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). + - !ruby/object:Api::Type::Integer + name: 'maxDigits' + description: | + Max length of DTMF digits. + - !ruby/object:Api::Type::String + name: 'finishDigit' + description: | + The digit that terminates a DTMF digit sequence. + - !ruby/object:Api::Type::NestedObject + name: 'gitIntegrationSettings' + description: | + Git integration settings for this agent. + allow_empty_object: true + properties: + - !ruby/object:Api::Type::NestedObject + name: 'githubSettings' + description: | + Settings of integration with GitHub. + properties: + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The unique repository display name for the GitHub repository. + - !ruby/object:Api::Type::String + name: 'repositoryUri' + description: | + The GitHub repository URI related to the agent. + - !ruby/object:Api::Type::String + name: 'trackingBranch' + description: | + The branch of the GitHub repository tracked for this agent. + - !ruby/object:Api::Type::String + name: 'accessToken' + description: | + The access token used to authenticate the access to the GitHub repository. + sensitive: true + ignore_read: true + custom_flatten: 'templates/terraform/custom_flatten/dialogflowcx_agent_git_integration_settings_github_settings_access_token.go.erb' + - !ruby/object:Api::Type::Array + name: 'branches' + description: | + A list of branches configured to be used from Dialogflow. + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'textToSpeechSettings' + description: | + Settings related to speech synthesizing. + allow_empty_object: true + properties: + # This is a map of language -> some settings. List of languages is large and constantly expanding so we use a string instead of a NestedObject with 100 properties. + - !ruby/object:Api::Type::String + name: 'synthesizeSpeechConfigs' + description: | + Configuration of how speech should be synthesized, mapping from [language](https://cloud.google.com/dialogflow/cx/docs/reference/language) to [SynthesizeSpeechConfig](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents#synthesizespeechconfig). + These settings affect: + * The phone gateway synthesize configuration set via Agent.text_to_speech_settings. + * How speech is synthesized when invoking session APIs. `Agent.text_to_speech_settings` only applies if `OutputAudioConfig.synthesize_speech_config` is not specified. + custom_expand: 'templates/terraform/custom_expand/json_schema.erb' + custom_flatten: 'templates/terraform/custom_flatten/json_schema.erb' + state_func: + 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); + return s }' + validation: !ruby/object:Provider::Terraform::Validation + function: 'validation.StringIsJSON' diff --git a/mmv1/products/dialogflowcx/Environment.yaml b/mmv1/products/dialogflowcx/Environment.yaml index fa0b087649ac..b8ad55a4b8ae 100644 --- a/mmv1/products/dialogflowcx/Environment.yaml +++ b/mmv1/products/dialogflowcx/Environment.yaml @@ -25,6 +25,7 @@ references: !ruby/object:Api::Resource::ReferenceLinks 'Official Documentation': 'https://cloud.google.com/dialogflow/cx/docs' api: 'https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.environments' async: !ruby/object:Api::OpAsync + actions: ['create', 'update'] operation: !ruby/object:Api::OpAsync::Operation path: 'name' full_url: 'https://{{location}}-dialogflow.googleapis.com/v3/{{op_id}}' @@ -32,7 +33,6 @@ async: !ruby/object:Api::OpAsync timeouts: !ruby/object:Api::Timeouts insert_minutes: 60 update_minutes: 60 - delete_minutes: 60 result: !ruby/object:Api::OpAsync::Result path: 'response' resource_inside_response: true @@ -45,7 +45,6 @@ async: !ruby/object:Api::OpAsync error: !ruby/object:Api::OpAsync::Error path: 'error' message: 'message' -exclude_resource: true timeouts: !ruby/object:Api::Timeouts insert_minutes: 40 update_minutes: 40 @@ -55,12 +54,20 @@ custom_code: !ruby/object:Provider::Terraform::CustomCode pre_update: templates/terraform/pre_create/dialogflow_set_location.go.erb pre_delete: templates/terraform/pre_create/dialogflow_set_location.go.erb pre_read: templates/terraform/pre_create/dialogflow_set_location.go.erb +custom_diff: + - 'tpgresource.DefaultProviderProject' examples: - !ruby/object:Provider::Terraform::Examples name: 'dialogflowcx_environment_full' primary_resource_id: 'development' vars: agent_name: 'dialogflowcx-agent' + - !ruby/object:Provider::Terraform::Examples + skip_docs: true + name: 'dialogflowcx_environment_regional' + primary_resource_id: 'development' + vars: + agent_name: 'issue-12880' skip_sweeper: true id_format: '{{parent}}/environments/{{name}}' import_format: ['{{parent}}/environments/{{name}}'] diff --git a/mmv1/products/dialogflowcx/Flow.yaml b/mmv1/products/dialogflowcx/Flow.yaml index 883a76e67d36..061b2e2b222f 100644 --- a/mmv1/products/dialogflowcx/Flow.yaml +++ b/mmv1/products/dialogflowcx/Flow.yaml @@ -27,16 +27,37 @@ timeouts: !ruby/object:Api::Timeouts update_minutes: 40 custom_code: !ruby/object:Provider::Terraform::CustomCode custom_import: templates/terraform/custom_import/dialogflowcx_flow.go.erb - pre_create: templates/terraform/pre_create/dialogflow_set_location.go.erb + pre_create: templates/terraform/pre_create/dialogflowcx_set_location_skip_default_obj.go.erb pre_update: templates/terraform/pre_create/dialogflow_set_location.go.erb - pre_delete: templates/terraform/pre_create/dialogflow_set_location.go.erb + pre_delete: templates/terraform/pre_delete/dialogflowcx_set_location_skip_default_obj.go.erb pre_read: templates/terraform/pre_create/dialogflow_set_location.go.erb +virtual_fields: + - !ruby/object:Api::Type::Boolean + name: is_default_start_flow + immutable: true + description: | + Marks this as the [Default Start Flow](https://cloud.google.com/dialogflow/cx/docs/concept/flow#start) for an agent. When you create an agent, the Default Start Flow is created automatically. + The Default Start Flow cannot be deleted; deleting the `google_dialogflow_cx_flow` resource does nothing to the underlying GCP resources. + + ~> Avoid having multiple `google_dialogflow_cx_flow` resources linked to the same agent with `is_default_start_flow = true` because they will compete to control a single Default Start Flow resource in GCP. examples: + - !ruby/object:Provider::Terraform::Examples + name: 'dialogflowcx_flow_basic' + primary_resource_id: 'basic_flow' + vars: + agent_name: 'dialogflowcx-agent' - !ruby/object:Provider::Terraform::Examples name: 'dialogflowcx_flow_full' primary_resource_id: 'basic_flow' vars: agent_name: 'dialogflowcx-agent' + bucket_name: 'dialogflowcx-bucket' + - !ruby/object:Provider::Terraform::Examples + skip_docs: true + name: 'dialogflowcx_flow_default_start_flow' + primary_resource_id: 'default_start_flow' + vars: + agent_name: 'dialogflowcx-agent' skip_sweeper: true id_format: '{{parent}}/flows/{{name}}' import_format: ['{{parent}}/flows/{{name}}'] @@ -538,3 +559,42 @@ properties: values: - :MODEL_TRAINING_MODE_AUTOMATIC - :MODEL_TRAINING_MODE_MANUAL + - !ruby/object:Api::Type::NestedObject + name: 'advancedSettings' + description: | + Hierarchical advanced settings for this flow. The settings exposed at the lower level overrides the settings exposed at the higher level. + Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'audioExportGcsDestination' + description: | + If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: + * Agent level + * Flow level + properties: + - !ruby/object:Api::Type::String + name: 'uri' + description: | + The Google Cloud Storage URI for the exported objects. Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. + Format: gs://bucket/object-name-or-prefix + - !ruby/object:Api::Type::NestedObject + name: 'dtmfSettings' + description: | + Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: + * Agent level + * Flow level + * Page level + * Parameter level + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). + - !ruby/object:Api::Type::Integer + name: 'maxDigits' + description: | + Max length of DTMF digits. + - !ruby/object:Api::Type::String + name: 'finishDigit' + description: | + The digit that terminates a DTMF digit sequence. diff --git a/mmv1/products/dialogflowcx/Intent.yaml b/mmv1/products/dialogflowcx/Intent.yaml index caf8c2dd32d7..3a287a4f5b07 100644 --- a/mmv1/products/dialogflowcx/Intent.yaml +++ b/mmv1/products/dialogflowcx/Intent.yaml @@ -27,16 +27,45 @@ timeouts: !ruby/object:Api::Timeouts update_minutes: 40 custom_code: !ruby/object:Provider::Terraform::CustomCode custom_import: templates/terraform/custom_import/dialogflowcx_intent.go.erb - pre_create: templates/terraform/pre_create/dialogflow_set_location.go.erb + pre_create: templates/terraform/pre_create/dialogflowcx_set_location_skip_default_obj.go.erb pre_update: templates/terraform/pre_create/dialogflow_set_location.go.erb - pre_delete: templates/terraform/pre_create/dialogflow_set_location.go.erb + pre_delete: templates/terraform/pre_delete/dialogflowcx_set_location_skip_default_obj.go.erb pre_read: templates/terraform/pre_create/dialogflow_set_location.go.erb +virtual_fields: + - !ruby/object:Api::Type::Boolean + name: is_default_welcome_intent + immutable: true + description: | + Marks this as the [Default Welcome Intent](https://cloud.google.com/dialogflow/cx/docs/concept/intent#welcome) for an agent. When you create an agent, a Default Welcome Intent is created automatically. + The Default Welcome Intent cannot be deleted; deleting the `google_dialogflow_cx_intent` resource does nothing to the underlying GCP resources. + + ~> Avoid having multiple `google_dialogflow_cx_intent` resources linked to the same agent with `is_default_welcome_intent = true` because they will compete to control a single Default Welcome Intent resource in GCP. + - !ruby/object:Api::Type::Boolean + name: is_default_negative_intent + immutable: true + description: | + Marks this as the [Default Negative Intent](https://cloud.google.com/dialogflow/cx/docs/concept/intent#negative) for an agent. When you create an agent, a Default Negative Intent is created automatically. + The Default Negative Intent cannot be deleted; deleting the `google_dialogflow_cx_intent` resource does nothing to the underlying GCP resources. + + ~> Avoid having multiple `google_dialogflow_cx_intent` resources linked to the same agent with `is_default_negative_intent = true` because they will compete to control a single Default Negative Intent resource in GCP. examples: - !ruby/object:Provider::Terraform::Examples name: 'dialogflowcx_intent_full' primary_resource_id: 'basic_intent' vars: agent_name: 'dialogflowcx-agent' + - !ruby/object:Provider::Terraform::Examples + skip_docs: true + name: 'dialogflowcx_intent_default_negative_intent' + primary_resource_id: 'default_negative_intent' + vars: + agent_name: 'dialogflowcx-agent' + - !ruby/object:Provider::Terraform::Examples + skip_docs: true + name: 'dialogflowcx_intent_default_welcome_intent' + primary_resource_id: 'default_welcome_intent' + vars: + agent_name: 'dialogflowcx-agent' skip_sweeper: true id_format: '{{parent}}/intents/{{name}}' import_format: ['{{parent}}/intents/{{name}}'] @@ -144,7 +173,8 @@ properties: description: | Indicates whether this is a fallback intent. Currently only default fallback intent is allowed in the agent, which is added upon agent creation. Adding training phrases to fallback intent is useful in the case of requests that are mistakenly matched, since training phrases assigned to fallback intents act as negative examples that triggers no-match event. - - !ruby/object:Api::Type::KeyValuePairs + To manage the fallback intent, set `is_default_negative_intent = true` + - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: | The key/value metadata to label an intent. Labels can contain lowercase letters, digits and the symbols '-' and '_'. International characters are allowed, including letters from unicase alphabets. Keys must start with a letter. Keys and values can be no longer than 63 characters and no more than 128 bytes. diff --git a/mmv1/products/dialogflowcx/Page.yaml b/mmv1/products/dialogflowcx/Page.yaml index 3ce813fad091..8081fc155667 100644 --- a/mmv1/products/dialogflowcx/Page.yaml +++ b/mmv1/products/dialogflowcx/Page.yaml @@ -703,6 +703,33 @@ properties: description: | Indicates whether the parameter content should be redacted in log. If redaction is enabled, the parameter content will be replaced by parameter name during logging. Note: the parameter content is subject to redaction if either parameter level redaction or entity type level redaction is enabled. + - !ruby/object:Api::Type::NestedObject + name: 'advancedSettings' + description: | + Hierarchical advanced settings for this parameter. The settings exposed at the lower level overrides the settings exposed at the higher level. + Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'dtmfSettings' + description: | + Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: + * Agent level + * Flow level + * Page level + * Parameter level + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). + - !ruby/object:Api::Type::Integer + name: 'maxDigits' + description: | + Max length of DTMF digits. + - !ruby/object:Api::Type::String + name: 'finishDigit' + description: | + The digit that terminates a DTMF digit sequence. - !ruby/object:Api::Type::Array name: 'transitionRouteGroups' description: | @@ -1132,3 +1159,30 @@ properties: description: | The target flow to transition to. Format: projects//locations//agents//flows/. + - !ruby/object:Api::Type::NestedObject + name: 'advancedSettings' + description: | + Hierarchical advanced settings for this page. The settings exposed at the lower level overrides the settings exposed at the higher level. + Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'dtmfSettings' + description: | + Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: + * Agent level + * Flow level + * Page level + * Parameter level + properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + description: | + If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). + - !ruby/object:Api::Type::Integer + name: 'maxDigits' + description: | + Max length of DTMF digits. + - !ruby/object:Api::Type::String + name: 'finishDigit' + description: | + The digit that terminates a DTMF digit sequence. diff --git a/mmv1/products/dialogflowcx/SecuritySettings.yaml b/mmv1/products/dialogflowcx/SecuritySettings.yaml new file mode 100644 index 000000000000..1c4103c725d4 --- /dev/null +++ b/mmv1/products/dialogflowcx/SecuritySettings.yaml @@ -0,0 +1,162 @@ +# Copyright 2023 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'SecuritySettings' +base_url: 'projects/{{project}}/locations/{{location}}/securitySettings' +update_verb: :PATCH +update_mask: true +description: | + Represents the settings related to security issues, such as data redaction and data retention. It may take hours for updates on the settings to propagate to all the related components and take effect. + Multiple security settings can be configured in each location. Each agent can specify the security settings to apply, and each setting can be applied to multiple agents in the same project and location. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': 'https://cloud.google.com/dialogflow/cx/docs' + api: 'https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.securitySettings' +timeouts: !ruby/object:Api::Timeouts + insert_minutes: 40 + update_minutes: 40 +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'dialogflowcx_security_settings_basic' + primary_resource_id: 'basic_security_settings' + vars: + settings_name: 'dialogflowcx-security-settings' + - !ruby/object:Provider::Terraform::Examples + name: 'dialogflowcx_security_settings_full' + primary_resource_id: 'basic_security_settings' + vars: + inspect_name: 'dialogflowcx-inspect-template' + deidentify_name: 'dialogflowcx-deidentify-template' + settings_name: 'dialogflowcx-security-settings' + bucket_name: 'dialogflowcx-bucket' + test_env_vars: + project: :PROJECT_NAME +id_format: 'projects/{{project}}/locations/{{location}}/securitySettings/{{name}}' +import_format: ['projects/{{project}}/locations/{{location}}/securitySettings/{{name}}'] +properties: + - !ruby/object:Api::Type::String + name: 'name' + output: true + description: | + The unique identifier of the settings. + Format: projects//locations//securitySettings/. + custom_flatten: templates/terraform/custom_flatten/name_from_self_link.erb + - !ruby/object:Api::Type::String + name: 'location' + description: | + The location these settings are located in. Settings can only be applied to an agent in the same location. + See [Available Regions](https://cloud.google.com/dialogflow/cx/docs/concept/region#avail) for a list of supported locations. + required: true + immutable: true + url_param_only: true + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The human-readable name of the security settings, unique within the location. + required: true + - !ruby/object:Api::Type::Enum + name: 'redactionStrategy' + description: | + Defines how we redact data. If not set, defaults to not redacting. + * REDACT_WITH_SERVICE: Call redaction service to clean up the data to be persisted. + values: + - :REDACT_WITH_SERVICE + - !ruby/object:Api::Type::Enum + name: 'redactionScope' + description: | + Defines what types of data to redact. If not set, defaults to not redacting any kind of data. + * REDACT_DISK_STORAGE: On data to be written to disk or similar devices that are capable of holding data even if power is disconnected. This includes data that are temporarily saved on disk. + values: + - :REDACT_DISK_STORAGE + - !ruby/object:Api::Type::String + name: 'inspectTemplate' + description: | + [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. + Note: inspectTemplate must be located in the same region as the SecuritySettings. + Format: projects//locations//inspectTemplates/