diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 736d19f894..b21f02d862 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -1822,6 +1822,7 @@ func getCurrentPodNames(ctx context.Context, clientSet clientset.Interface, name names := []string{} for _, item := range podList.Items { + fmt.Printf("pod: %v, status: %v, second: %v\n", item.Name, item.Status.Phase, item.DeletionGracePeriodSeconds) names = append(names, item.Name) } return names diff --git a/test/e2e/e2e_toomanyrestarts_test.go b/test/e2e/e2e_toomanyrestarts_test.go index e48bb4413d..2e30db94a2 100644 --- a/test/e2e/e2e_toomanyrestarts_test.go +++ b/test/e2e/e2e_toomanyrestarts_test.go @@ -141,6 +141,12 @@ func TestTooManyRestarts(t *testing.T) { policy: tooManyRestartsPolicy(testNamespace.Name, 3, true, 0), expectedEvictedPodCount: 4, }, + { + name: "test-one-evictions-use-gracePeriodSeconds", + policy: tooManyRestartsPolicy(testNamespace.Name, 3, true, 15), + enableGracePeriod: true, + expectedEvictedPodCount: 4, + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -198,12 +204,43 @@ func TestTooManyRestarts(t *testing.T) { if len(deschedulerPods) != 0 { deschedulerPodName = deschedulerPods[0].Name } + + // Check if grace period is enabled and wait accordingly + if tc.enableGracePeriod { + // Ensure no pods are evicted during the grace period + // Wait for 12 seconds to ensure that the pods are not evicted during the grace period + // We do not want to use an extreme waiting time, such as 15 seconds, + // because the grace period is set to 30 seconds. + // In order to avoid unnecessary flake failures, + // we only need to make sure that the pod is not evicted within a certain range. + duration := time.Duration(12) * time.Second + t.Logf("Waiting for grace period of %v seconds", duration) + ctx, cancel := context.WithTimeout(context.Background(), duration) + defer cancel() + var gracePeriodCheck bool + for !gracePeriodCheck { + select { + case <-ctx.Done(): + t.Logf("Grace period timeout reached.") + gracePeriodCheck = true + default: + currentRunNames := sets.NewString(getCurrentPodNames(ctx, clientSet, testNamespace.Name, t)...) + t.Logf("preRunNames: %v, currentRunNames: %v\n", preRunNames.List(), currentRunNames.List()) + + // Check if any pods were evicted + if diff := preRunNames.Len() - currentRunNames.Len(); diff > 0 { + t.Fatalf("Expected no pods to be evicted, but found %d pods evicted.", diff) + } + <-time.After(2 * time.Second) + } + } + } // Run RemovePodsHavingTooManyRestarts strategy - if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 50*time.Second, true, func(ctx context.Context) (bool, error) { - currentRunNames := sets.NewString(getCurrentPodNames(ctx, clientSet, testNamespace.Name, t)...) - actualEvictedPod := preRunNames.Difference(currentRunNames) + if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 120*time.Second, true, func(ctx context.Context) (bool, error) { + currentRunNames1 := sets.NewString(getCurrentPodNames(ctx, clientSet, testNamespace.Name, t)...) + actualEvictedPod := preRunNames.Difference(currentRunNames1) actualEvictedPodCount := uint(actualEvictedPod.Len()) - t.Logf("preRunNames: %v, currentRunNames: %v, actualEvictedPodCount: %v\n", preRunNames.List(), currentRunNames.List(), actualEvictedPodCount) + t.Logf("preRunNames: %v, currentRunNames: %v, actualEvictedPodCount: %v\n", preRunNames.List(), currentRunNames1.List(), actualEvictedPodCount) if actualEvictedPodCount < tc.expectedEvictedPodCount { t.Logf("Expecting %v number of pods evicted, got %v instead", tc.expectedEvictedPodCount, actualEvictedPodCount) return false, nil