From 1f8d47604935e5c3f16389dc9d4437c6e9bc33a3 Mon Sep 17 00:00:00 2001 From: stevenhorsman Date: Thu, 26 Sep 2024 11:27:20 +0100 Subject: [PATCH 01/11] test/e2e: Refactor out common patterns We have some repeated patterns of looking through list of pods to find the one matching our expected name and then extracting some information from it. Rather than repeat this in multiple places we can refactor this pattern out in assessment_helpers and re-use it. Signed-off-by: stevenhorsman --- .../test/e2e/assessment_helpers.go | 55 +++++++++---------- 1 file changed, 26 insertions(+), 29 deletions(-) diff --git a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go index dded262f5..bd02de667 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go @@ -199,30 +199,19 @@ func GetPodLog(ctx context.Context, client klient.Client, pod v1.Pod) (string, e if err != nil { return "", err } - return buf.String(), nil + return strings.TrimSpace(buf.String()), nil } -func ComparePodLogString(ctx context.Context, client klient.Client, customPod v1.Pod, expectedPodlogString string) (string, error) { - podLogString := "" - var podlist v1.PodList - if err := client.Resources(customPod.Namespace).List(ctx, &podlist); err != nil { - return podLogString, err - } - //adding sleep time to intialize container and ready for logging +func ComparePodLogString(ctx context.Context, client klient.Client, customPod v1.Pod, expectedPodLogString string) (string, error) { + //adding sleep time to initialize container and ready for logging time.Sleep(5 * time.Second) - for _, pod := range podlist.Items { - if pod.ObjectMeta.Name == customPod.Name { - var err error - podLogString, err = GetPodLog(ctx, client, pod) - if err != nil { - return "", err - } - podLogString = strings.TrimSpace(podLogString) - break - } + + podLogString, err := getStringFromPod(ctx, client, customPod, GetPodLog) + if err != nil { + return "", err } - if !strings.Contains(podLogString, expectedPodlogString) { + if !strings.Contains(podLogString, expectedPodLogString) { return podLogString, errors.New("Error: Pod Log doesn't contain Expected String") } @@ -230,17 +219,10 @@ func ComparePodLogString(ctx context.Context, client klient.Client, customPod v1 } func GetNodeNameFromPod(ctx context.Context, client klient.Client, customPod v1.Pod) (string, error) { - var podlist v1.PodList - if err := client.Resources(customPod.Namespace).List(ctx, &podlist); err != nil { - return "", err + var getNodeName = func(ctx context.Context, client klient.Client, pod v1.Pod) (string, error) { + return pod.Spec.NodeName, nil } - - for _, pod := range podlist.Items { - if pod.ObjectMeta.Name == customPod.Name { - return pod.Spec.NodeName, nil - } - } - return "", errors.New("Pod wasn't found") + return getStringFromPod(ctx, client, customPod, getNodeName) } func GetSuccessfulAndErroredPods(ctx context.Context, t *testing.T, client klient.Client, job batchv1.Job) (int, int, string, error) { @@ -564,3 +546,18 @@ func GetPodNamesByLabel(ctx context.Context, client klient.Client, t *testing.T, return pods, nil } + +type podToString func(context.Context, klient.Client, v1.Pod) (string, error) + +func getStringFromPod(ctx context.Context, client klient.Client, pod v1.Pod, fn podToString) (string, error) { + var podlist v1.PodList + if err := client.Resources(pod.Namespace).List(ctx, &podlist); err != nil { + return "", err + } + for _, podItem := range podlist.Items { + if podItem.ObjectMeta.Name == pod.Name { + return fn(ctx, client, podItem) + } + } + return "", fmt.Errorf("no pod matching %v, was found", pod) +} From fd57b7e21cd174cc19022fd1a231e8dfaba4bfdc Mon Sep 17 00:00:00 2001 From: stevenhorsman Date: Thu, 26 Sep 2024 12:22:25 +0100 Subject: [PATCH 02/11] test/e2e: Rework PodEventExtractor I used the `PodEventExtractor` method to compare against an expected error message, but didn't realise at the time that it just watched for the first warning message and returned that. We then use this output in `assessment_runner` to see if our expected error description is there. Note: As we use it in the `Assess` phase of testing and the pod should be in the expected state after the `WithSetup`, we should be able to replace the need for the watch and just get a list of the events - Rename to clarify that only warning (and error) events are considered and create a `ComparePodEventDescriptions` method in `assessment_helper` to help simplify the logic in `assessment_runner` and process all warning events looking for a description match, not just the first match. If no error was expected then mark the test as failing. - In some of the tests that expect failure events, as we are no longer watching for the first error, we should wait for pod to have gone into failed state before processing the events, in others we add a retry to check for the message. Based on the above logic we can simplify the instance type testing Signed-off-by: stevenhorsman --- .../test/e2e/assessment_helpers.go | 81 +++++++---- .../test/e2e/assessment_runner.go | 130 +++++++----------- .../test/e2e/common_suite.go | 98 ++----------- 3 files changed, 108 insertions(+), 201 deletions(-) diff --git a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go index bd02de667..fcd66b278 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go @@ -46,12 +46,6 @@ func timeExtractor(log string) (string, error) { return matchString[0], nil } -type PodEvents struct { - EventType string - EventDescription string - EventReason string -} - func NewTestCase(t *testing.T, e env.Environment, testName string, assert CloudAssert, assessMessage string) *TestCase { testCase := &TestCase{ testing: t, @@ -79,29 +73,6 @@ func NewExtraPod(namespace string, podName string, containerName string, imageNa return extPod } -func PodEventExtractor(ctx context.Context, client klient.Client, pod v1.Pod) (*PodEvents, error) { - clientset, err := kubernetes.NewForConfig(client.RESTConfig()) - if err != nil { - return nil, err - } - watcher, err := clientset.CoreV1().Events(pod.Namespace).Watch(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s", pod.Name)}) - if err != nil { - return nil, err - } - defer watcher.Stop() - for event := range watcher.ResultChan() { - - if event.Object.(*v1.Event).Type == v1.EventTypeWarning { - var newPodEvents PodEvents - newPodEvents.EventType = event.Object.(*v1.Event).Type - newPodEvents.EventDescription = event.Object.(*v1.Event).Message - newPodEvents.EventType = event.Object.(*v1.Event).Reason - return &newPodEvents, nil - } - } - return nil, errors.New("No Events Found in PodVM") -} - func WatchImagePullTime(ctx context.Context, client klient.Client, caaPod v1.Pod, pod v1.Pod) (string, error) { pullingtime := "" var startTime, endTime time.Time @@ -218,6 +189,58 @@ func ComparePodLogString(ctx context.Context, client klient.Client, customPod v1 return podLogString, nil } +// Note: there are currently two event types: Normal and Warning, so Warning includes errors +func GetPodEventWarningDescriptions(ctx context.Context, client klient.Client, pod v1.Pod) (string, error) { + clientset, err := kubernetes.NewForConfig(client.RESTConfig()) + if err != nil { + return "", err + } + + events, err := clientset.CoreV1().Events(pod.Namespace).List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s", pod.Name)}) + if err != nil { + return "", err + } + + var descriptionsBuilder strings.Builder + for _, event := range events.Items { + if event.Type == v1.EventTypeWarning { + descriptionsBuilder.WriteString(event.Message) + } + } + return descriptionsBuilder.String(), nil +} + +// This function takes an expected pod event "warning" string (note warning also covers errors) and checks to see if it +// shows up in the event log of the pod. Some pods error in failed state, so can be immediately checks, others fail +// in waiting state (e.g. ImagePullBackoff errors), so we need to poll for errors showing up on these pods +func ComparePodEventWarningDescriptions(ctx context.Context, t *testing.T, client klient.Client, pod v1.Pod, expectedPodEvent string) error { + retries := 1 + delay := 10 * time.Second + + if pod.Status.Phase != v1.PodFailed { + // If not failed state we might have to wait/retry until the error happens + retries = int(WAIT_POD_RUNNING_TIMEOUT / delay) + } + + var err error = nil + for retries > 0 { + podEventsDescriptions, podErr := getStringFromPod(ctx, client, pod, GetPodEventWarningDescriptions) + if podErr != nil { + return podErr + } + + t.Logf("podEvents: %s\n", podEventsDescriptions) + if !strings.Contains(podEventsDescriptions, expectedPodEvent) { + err = fmt.Errorf("error: Pod Events don't contain Expected String %s", expectedPodEvent) + } else { + return nil + } + retries-- + time.Sleep(delay) + } + return err +} + func GetNodeNameFromPod(ctx context.Context, client klient.Client, customPod v1.Pod) (string, error) { var getNodeName = func(ctx context.Context, client klient.Client, pod v1.Pod) (string, error) { return pod.Spec.NodeName, nil diff --git a/src/cloud-api-adaptor/test/e2e/assessment_runner.go b/src/cloud-api-adaptor/test/e2e/assessment_runner.go index 1056861ed..07de5e3bd 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_runner.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_runner.go @@ -6,7 +6,6 @@ package e2e import ( "bytes" "context" - "errors" "fmt" "os" "strings" @@ -44,41 +43,35 @@ type ExtraPod struct { pod *v1.Pod imagePullTimer bool expectedPodLogString string - testInstanceTypes InstanceValidatorFunctions podState v1.PodPhase testCommands []TestCommand } -type InstanceValidatorFunctions struct { - testSuccessfn func(instance string) bool - testFailurefn func(error error) bool -} - type TestCase struct { - testing *testing.T - testEnv env.Environment - testName string - assert CloudAssert - assessMessage string - pod *v1.Pod - extraPods []*ExtraPod - configMap *v1.ConfigMap - secret *v1.Secret - extraSecrets []*v1.Secret - pvc *v1.PersistentVolumeClaim - job *batchv1.Job - service *v1.Service - testCommands []TestCommand - expectedPodLogString string - expectedPodDescribe string - podState v1.PodPhase - imagePullTimer bool - noAuthJson bool - deletionWithin time.Duration - testInstanceTypes InstanceValidatorFunctions - isNydusSnapshotter bool - FailReason string - alternateImageName string + testing *testing.T + testEnv env.Environment + testName string + assert CloudAssert + assessMessage string + pod *v1.Pod + extraPods []*ExtraPod + configMap *v1.ConfigMap + secret *v1.Secret + extraSecrets []*v1.Secret + pvc *v1.PersistentVolumeClaim + job *batchv1.Job + service *v1.Service + testCommands []TestCommand + expectedPodLogString string + expectedPodEventErrorString string + podState v1.PodPhase + imagePullTimer bool + noAuthJson bool + deletionWithin time.Duration + expectedInstanceType string + isNydusSnapshotter bool + FailReason string + alternateImageName string } func (tc *TestCase) WithConfigMap(configMap *v1.ConfigMap) *TestCase { @@ -131,8 +124,8 @@ func (tc *TestCase) WithTestCommands(TestCommands []TestCommand) *TestCase { return tc } -func (tc *TestCase) WithInstanceTypes(testInstanceTypes InstanceValidatorFunctions) *TestCase { - tc.testInstanceTypes = testInstanceTypes +func (tc *TestCase) WithExpectedInstanceType(expectedInstanceType string) *TestCase { + tc.expectedInstanceType = expectedInstanceType return tc } @@ -151,8 +144,8 @@ func (tc *TestCase) WithExpectedPodLogString(expectedPodLogString string) *TestC return tc } -func (tc *TestCase) WithExpectedPodDescribe(expectedPodDescribe string) *TestCase { - tc.expectedPodDescribe = expectedPodDescribe +func (tc *TestCase) WithExpectedPodEventError(expectedPodEventMessage string) *TestCase { + tc.expectedPodEventErrorString = expectedPodEventMessage return tc } @@ -372,56 +365,34 @@ func (tc *TestCase) Run() { t.Logf("Log output of peer pod:%s", LogString) } - if tc.expectedPodDescribe != "" { - if err := client.Resources(tc.pod.Namespace).List(ctx, &podlist); err != nil { + if tc.expectedPodEventErrorString != "" { + err := ComparePodEventWarningDescriptions(ctx, t, client, *tc.pod, tc.expectedPodEventErrorString) + if err != nil { t.Fatal(err) } - for _, podItem := range podlist.Items { - if podItem.ObjectMeta.Name == tc.pod.Name { - podEvent, err := PodEventExtractor(ctx, client, *tc.pod) - if err != nil { - t.Fatal(err) - } - t.Logf("podEvent: %+v\n", podEvent) - if strings.Contains(podEvent.EventDescription, tc.expectedPodDescribe) { - t.Logf("Output Log from Pod: %s", podEvent) - } else { - t.Errorf("Job Created pod with Invalid log") - } - break - } else { - t.Fatal("Pod Not Found...") - } + } else { + // There shouldn't have been any pod event warnings/errors + warnings, err := GetPodEventWarningDescriptions(ctx, client, *tc.pod) + if err != nil { + t.Fatal(err) + } + if warnings != "" { + t.Fatal(fmt.Errorf("unexpected warning/error event(s): %s", warnings)) } } - if tc.testInstanceTypes.testSuccessfn != nil && tc.testInstanceTypes.testFailurefn != nil { + if tc.expectedInstanceType != "" { if err := client.Resources(tc.pod.Namespace).List(ctx, &podlist); err != nil { t.Fatal(err) } - for _, podItem := range podlist.Items { if podItem.ObjectMeta.Name == tc.pod.Name { - profile, error := tc.assert.GetInstanceType(t, tc.pod.Name) - if error != nil { - if error.Error() == "Failed to Create PodVM Instance" { - podEvent, err := PodEventExtractor(ctx, client, *tc.pod) - if err != nil { - t.Fatal(err) - } - if !tc.testInstanceTypes.testFailurefn(errors.New(podEvent.EventDescription)) { - t.Fatal(fmt.Errorf("Pod Failed to execute expected error message %v", error.Error())) - } - } else { - t.Fatal(error) - } - + profile, err := tc.assert.GetInstanceType(t, tc.pod.Name) + if err != nil { + t.Fatal(err) } - if profile != "" { - t.Logf("PodVM Created with Instance Type: %v", profile) - if !tc.testInstanceTypes.testSuccessfn(profile) { - t.Fatal(fmt.Errorf("PodVM Created with Different Instance Type %v", profile)) - } + if profile != tc.expectedInstanceType { + t.Errorf("PodVM was expected to have instance type %s, but has %s", tc.expectedInstanceType, profile) } break } else { @@ -457,12 +428,7 @@ func (tc *TestCase) Run() { if tc.podState != v1.PodRunning && tc.podState != v1.PodSucceeded { profile, error := tc.assert.GetInstanceType(t, tc.pod.Name) if error != nil { - if error.Error() == "Failed to Create PodVM Instance" { - _, err := PodEventExtractor(ctx, client, *tc.pod) - if err != nil { - t.Fatal(err) - } - } else { + if error.Error() != "Failed to Create PodVM Instance" { t.Fatal(error) } } else if profile != "" { @@ -534,10 +500,6 @@ func (tc *TestCase) Run() { } t.Logf("Log output of peer pod:%s", LogString) } - if extraPod.testInstanceTypes.testSuccessfn != nil && extraPod.testInstanceTypes.testFailurefn != nil { - // TBD - t.Fatal("Error: testInstanceTypes hasn't been implemented in extraPods. Please implement assess for function testInstanceTypes.") - } if extraPod.podState == v1.PodRunning { if len(extraPod.testCommands) > 0 { logString, err := AssessPodTestCommands(ctx, client, extraPod.pod, extraPod.testCommands) diff --git a/src/cloud-api-adaptor/test/e2e/common_suite.go b/src/cloud-api-adaptor/test/e2e/common_suite.go index 85906acde..5e99778d1 100644 --- a/src/cloud-api-adaptor/test/e2e/common_suite.go +++ b/src/cloud-api-adaptor/test/e2e/common_suite.go @@ -255,7 +255,7 @@ func DoTestCreatePeerPodWithAuthenticatedImageWithoutCredentials(t *testing.T, e imageName := os.Getenv("AUTHENTICATED_REGISTRY_IMAGE") pod := NewPod(E2eNamespace, podName, podName, imageName, WithRestartPolicy(v1.RestartPolicyNever)) expectedErrorString := "401 UNAUTHORIZED" - NewTestCase(t, e, "InvalidAuthImagePeerPod", assert, "Peer pod with Authenticated Image without Credentials has been created").WithPod(pod).WithNoAuthJson().WithExpectedPodDescribe(expectedErrorString).WithCustomPodState(v1.PodPending).Run() + NewTestCase(t, e, "InvalidAuthImagePeerPod", assert, "Peer pod with Authenticated Image without Credentials has been created").WithPod(pod).WithNoAuthJson().WithExpectedPodEventError(expectedErrorString).WithCustomPodState(v1.PodPending).Run() } func DoTestPodVMwithNoAnnotations(t *testing.T, e env.Environment, assert CloudAssert, expectedType string) { @@ -264,19 +264,7 @@ func DoTestPodVMwithNoAnnotations(t *testing.T, e env.Environment, assert CloudA containerName := "busybox" imageName := BUSYBOX_IMAGE pod := NewPod(E2eNamespace, podName, containerName, imageName, WithCommand([]string{"/bin/sh", "-c", "sleep 3600"})) - testInstanceTypes := InstanceValidatorFunctions{ - testSuccessfn: func(instance string) bool { - if instance == expectedType { - t.Logf("PodVM Created with %s Instance type successfully...", instance) - return true - } else { - t.Logf("Failed to Create PodVM with %s Instance type", expectedType) - return false - } - }, - testFailurefn: IsErrorEmpty, - } - NewTestCase(t, e, "PodVMWithNoAnnotations", assert, "PodVM with No Annotation is created").WithPod(pod).WithInstanceTypes(testInstanceTypes).Run() + NewTestCase(t, e, "PodVMWithNoAnnotations", assert, "PodVM with No Annotation is created").WithPod(pod).WithExpectedInstanceType(expectedType).Run() } func DoTestPodVMwithAnnotationsInstanceType(t *testing.T, e env.Environment, assert CloudAssert, expectedType string) { @@ -287,20 +275,7 @@ func DoTestPodVMwithAnnotationsInstanceType(t *testing.T, e env.Environment, ass "io.katacontainers.config.hypervisor.machine_type": expectedType, } pod := NewPod(E2eNamespace, podName, containerName, imageName, WithCommand([]string{"/bin/sh", "-c", "sleep 3600"}), WithAnnotations(annotationData)) - - testInstanceTypes := InstanceValidatorFunctions{ - testSuccessfn: func(instance string) bool { - if instance == expectedType { - t.Logf("PodVM Created with %s Instance type successfully...", instance) - return true - } else { - t.Logf("Failed to Create PodVM with %s Instance type", expectedType) - return false - } - }, - testFailurefn: IsErrorEmpty, - } - NewTestCase(t, e, "PodVMwithAnnotationsInstanceType", assert, "PodVM with Annotation is created").WithPod(pod).WithInstanceTypes(testInstanceTypes).Run() + NewTestCase(t, e, "PodVMwithAnnotationsInstanceType", assert, "PodVM with Annotation is created").WithPod(pod).WithExpectedInstanceType(expectedType).Run() } func DoTestPodVMwithAnnotationsCPUMemory(t *testing.T, e env.Environment, assert CloudAssert, expectedType string) { @@ -312,98 +287,45 @@ func DoTestPodVMwithAnnotationsCPUMemory(t *testing.T, e env.Environment, assert "io.katacontainers.config.hypervisor.default_memory": "12288", } pod := NewPod(E2eNamespace, podName, containerName, imageName, WithCommand([]string{"/bin/sh", "-c", "sleep 3600"}), WithAnnotations(annotationData)) - - testInstanceTypes := InstanceValidatorFunctions{ - testSuccessfn: func(instance string) bool { - if instance == expectedType { - t.Logf("PodVM Created with %s Instance type successfully...", instance) - return true - } else { - t.Logf("Failed to Create PodVM with %s Instance type", expectedType) - return false - } - }, - testFailurefn: IsErrorEmpty, - } - NewTestCase(t, e, "PodVMwithAnnotationsCPUMemory", assert, "PodVM with Annotations CPU Memory is created").WithPod(pod).WithInstanceTypes(testInstanceTypes).Run() + NewTestCase(t, e, "PodVMwithAnnotationsCPUMemory", assert, "PodVM with Annotations CPU Memory is created").WithPod(pod).WithExpectedInstanceType(expectedType).Run() } func DoTestPodVMwithAnnotationsInvalidInstanceType(t *testing.T, e env.Environment, assert CloudAssert, expectedType string) { podName := "annotations-invalid-instance-type" containerName := "busybox" imageName := BUSYBOX_IMAGE - expectedErrorMessage := `requested instance type ("` + expectedType + `") is not part of supported instance types list` annotationData := map[string]string{ "io.katacontainers.config.hypervisor.machine_type": expectedType, } pod := NewPod(E2eNamespace, podName, containerName, imageName, WithCommand([]string{"/bin/sh", "-c", "sleep 3600"}), WithAnnotations(annotationData)) - testInstanceTypes := InstanceValidatorFunctions{ - testSuccessfn: IsStringEmpty, - testFailurefn: func(errorMsg error) bool { - if strings.Contains(errorMsg.Error(), expectedErrorMessage) { - t.Logf("Got Expected Error: %v", errorMsg.Error()) - return true - } else { - t.Logf("Failed to Get Expected Error: %v", errorMsg.Error()) - return false - } - }, - } - NewTestCase(t, e, "PodVMwithAnnotationsInvalidInstanceType", assert, "Failed to Create PodVM with Annotations Invalid InstanceType").WithPod(pod).WithInstanceTypes(testInstanceTypes).WithCustomPodState(v1.PodPending).Run() + expectedErrorMessage := `requested instance type ("` + expectedType + `") is not part of supported instance types list` + NewTestCase(t, e, "PodVMwithAnnotationsInvalidInstanceType", assert, "Failed to Create PodVM with Annotations Invalid InstanceType").WithPod(pod).WithExpectedPodEventError(expectedErrorMessage).WithCustomPodState(v1.PodFailed).Run() } func DoTestPodVMwithAnnotationsLargerMemory(t *testing.T, e env.Environment, assert CloudAssert) { podName := "annotations-too-big-mem" containerName := "busybox" imageName := BUSYBOX_IMAGE - expectedErrorMessage := "failed to get instance type based on vCPU and memory annotations: no instance type found for the given vcpus (2) and memory (18432)" annotationData := map[string]string{ "io.katacontainers.config.hypervisor.default_vcpus": "2", "io.katacontainers.config.hypervisor.default_memory": "18432", } pod := NewPod(E2eNamespace, podName, containerName, imageName, WithCommand([]string{"/bin/sh", "-c", "sleep 3600"}), WithAnnotations(annotationData)) - testInstanceTypes := InstanceValidatorFunctions{ - testSuccessfn: IsStringEmpty, - testFailurefn: func(errorMsg error) bool { - if strings.Contains(errorMsg.Error(), expectedErrorMessage) { - t.Logf("Got Expected Error: %v", errorMsg.Error()) - return true - } else { - t.Logf("Failed to Get Expected Error: %v", errorMsg.Error()) - return false - } - }, - } - NewTestCase(t, e, "PodVMwithAnnotationsLargerMemory", assert, "Failed to Create PodVM with Annotations Larger Memory").WithPod(pod).WithInstanceTypes(testInstanceTypes).WithCustomPodState(v1.PodPending).Run() + expectedErrorMessage := "failed to get instance type based on vCPU and memory annotations: no instance type found for the given vcpus (2) and memory (18432)" + NewTestCase(t, e, "PodVMwithAnnotationsLargerMemory", assert, "Failed to Create PodVM with Annotations Larger Memory").WithPod(pod).WithExpectedPodEventError(expectedErrorMessage).WithCustomPodState(v1.PodFailed).Run() } func DoTestPodVMwithAnnotationsLargerCPU(t *testing.T, e env.Environment, assert CloudAssert) { podName := "annotations-too-big-cpu" containerName := "busybox" imageName := BUSYBOX_IMAGE - expectedErrorMessage := []string{ - "no instance type found for the given vcpus (3) and memory (12288)", - "Number of cpus 3 specified in annotation default_vcpus is greater than the number of CPUs 2 on the system", - } annotationData := map[string]string{ "io.katacontainers.config.hypervisor.default_vcpus": "3", "io.katacontainers.config.hypervisor.default_memory": "12288", } pod := NewPod(E2eNamespace, podName, containerName, imageName, WithCommand([]string{"/bin/sh", "-c", "sleep 3600"}), WithAnnotations(annotationData)) - testInstanceTypes := InstanceValidatorFunctions{ - testSuccessfn: IsStringEmpty, - testFailurefn: func(errorMsg error) bool { - for _, i := range expectedErrorMessage { - if strings.Contains(errorMsg.Error(), i) { - t.Logf("Got Expected Error: %v", errorMsg.Error()) - return true - } - } - t.Logf("Failed to Get Expected Error: %v", errorMsg.Error()) - return false - }, - } - NewTestCase(t, e, "PodVMwithAnnotationsLargerCPU", assert, "Failed to Create PodVM with Annotations Larger CPU").WithPod(pod).WithInstanceTypes(testInstanceTypes).WithCustomPodState(v1.PodPending).Run() + expectedErrorMessage := "no instance type found for the given vcpus (3) and memory (12288)" + NewTestCase(t, e, "PodVMwithAnnotationsLargerCPU", assert, "Failed to Create PodVM with Annotations Larger CPU").WithPod(pod).WithExpectedPodEventError(expectedErrorMessage).WithCustomPodState(v1.PodFailed).Run() } func DoTestCreatePeerPodContainerWithValidAlternateImage(t *testing.T, e env.Environment, assert CloudAssert, alternateImageName string) { From 003edef5443dd2ab9b6703b83acb333d6f16dc67 Mon Sep 17 00:00:00 2001 From: stevenhorsman Date: Thu, 26 Sep 2024 13:57:05 +0100 Subject: [PATCH 03/11] test/e2e: InstanceType refactor Refactor out the instance type check to the assessment_helper, to make the assessment_runner flow easier to read Signed-off-by: stevenhorsman --- .../test/e2e/assessment_helpers.go | 21 +++++++++++++++++++ .../test/e2e/assessment_runner.go | 18 ++-------------- .../test/e2e/common_suite.go | 15 +------------ 3 files changed, 24 insertions(+), 30 deletions(-) diff --git a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go index fcd66b278..077d737c8 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go @@ -241,6 +241,27 @@ func ComparePodEventWarningDescriptions(ctx context.Context, t *testing.T, clien return err } +func CompareInstanceType(ctx context.Context, t *testing.T, client klient.Client, pod v1.Pod, expectedInstanceType string, getInstanceTypeFn func(t *testing.T, podName string) (string, error)) error { + var podlist v1.PodList + if err := client.Resources(pod.Namespace).List(ctx, &podlist); err != nil { + return err + } + for _, podItem := range podlist.Items { + if podItem.ObjectMeta.Name == pod.Name { + instanceType, err := getInstanceTypeFn(t, pod.Name) + if err != nil { + t.Fatal(err) + } + if instanceType == expectedInstanceType { + return nil + } else { + return fmt.Errorf("error: Pod instance type was %s, but we expected %s ", instanceType, expectedInstanceType) + } + } + } + return fmt.Errorf("no pod matching %v, was found", pod) +} + func GetNodeNameFromPod(ctx context.Context, client klient.Client, customPod v1.Pod) (string, error) { var getNodeName = func(ctx context.Context, client klient.Client, pod v1.Pod) (string, error) { return pod.Spec.NodeName, nil diff --git a/src/cloud-api-adaptor/test/e2e/assessment_runner.go b/src/cloud-api-adaptor/test/e2e/assessment_runner.go index 07de5e3bd..31d929568 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_runner.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_runner.go @@ -382,24 +382,10 @@ func (tc *TestCase) Run() { } if tc.expectedInstanceType != "" { - if err := client.Resources(tc.pod.Namespace).List(ctx, &podlist); err != nil { + err := CompareInstanceType(ctx, t, client, *tc.pod, tc.expectedPodEventErrorString, tc.assert.GetInstanceType) + if err != nil { t.Fatal(err) } - for _, podItem := range podlist.Items { - if podItem.ObjectMeta.Name == tc.pod.Name { - profile, err := tc.assert.GetInstanceType(t, tc.pod.Name) - if err != nil { - t.Fatal(err) - } - if profile != tc.expectedInstanceType { - t.Errorf("PodVM was expected to have instance type %s, but has %s", tc.expectedInstanceType, profile) - } - break - } else { - t.Fatal("Pod Not Found...") - } - } - } if tc.podState == v1.PodRunning { diff --git a/src/cloud-api-adaptor/test/e2e/common_suite.go b/src/cloud-api-adaptor/test/e2e/common_suite.go index 5e99778d1..f33009d1b 100644 --- a/src/cloud-api-adaptor/test/e2e/common_suite.go +++ b/src/cloud-api-adaptor/test/e2e/common_suite.go @@ -350,20 +350,7 @@ func DoTestCreatePeerPodContainerWithInvalidAlternateImage(t *testing.T, e env.E "io.katacontainers.config.hypervisor.image": nonExistingImageName, } pod := NewPod(E2eNamespace, podName, containerName, imageName, WithCommand([]string{"/bin/sh", "-c", "sleep 3600"}), WithAnnotations(annotationData)) - - testInstanceTypes := InstanceValidatorFunctions{ - testSuccessfn: IsStringEmpty, - testFailurefn: func(errorMsg error) bool { - if strings.Contains(errorMsg.Error(), expectedErrorMessage) { - t.Logf("Got Expected Error: %v", errorMsg.Error()) - return true - } else { - t.Logf("Failed to Get Expected Error: %v", errorMsg.Error()) - return false - } - }, - } - NewTestCase(t, e, "PodVMwithAnnotationsInvalidAlternateImage", assert, "Failed to Create PodVM with a non-existent image").WithPod(pod).WithInstanceTypes(testInstanceTypes).WithCustomPodState(v1.PodPending).Run() + NewTestCase(t, e, "PodVMwithAnnotationsInvalidAlternateImage", assert, "Failed to Create PodVM with a non-existent image").WithPod(pod).WithExpectedPodEventError(expectedErrorMessage).WithCustomPodState(v1.PodPending).Run() } func DoTestPodToServiceCommunication(t *testing.T, e env.Environment, assert CloudAssert) { From c6d937aa322b14f23638f26abaecd2ede627c542 Mon Sep 17 00:00:00 2001 From: stevenhorsman Date: Mon, 21 Oct 2024 10:00:08 +0100 Subject: [PATCH 04/11] test/e2e: Refactor out `VerifyAlternateImage` Refactor out the logic that checks the alternativeImage is used to make the assessment_runner flow cleaner Signed-off-by: stevenhorsman --- .../test/e2e/assessment_helpers.go | 20 +++++++++++++++++++ .../test/e2e/assessment_runner.go | 14 +------------ 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go index 077d737c8..e43df124f 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go @@ -262,6 +262,26 @@ func CompareInstanceType(ctx context.Context, t *testing.T, client klient.Client return fmt.Errorf("no pod matching %v, was found", pod) } +func VerifyAlternateImage(ctx context.Context, t *testing.T, client klient.Client, alternateImageName string) error { + var caaPod v1.Pod + caaPod.Namespace = "confidential-containers-system" + expectedSuccessMessage := "Choosing " + alternateImageName + + pods, err := GetPodNamesByLabel(ctx, client, t, caaPod.Namespace, "app", "cloud-api-adaptor") + if err != nil { + t.Fatal(err) + } + + caaPod.Name = pods.Items[0].Name + LogString, err := ComparePodLogString(ctx, client, caaPod, expectedSuccessMessage) + if err != nil { + t.Logf("Output:%s", LogString) + t.Fatal(err) + } + t.Logf("PodVM was brought up using the alternate PodVM image %s", alternateImageName) + return nil +} + func GetNodeNameFromPod(ctx context.Context, client klient.Client, customPod v1.Pod) (string, error) { var getNodeName = func(ctx context.Context, client klient.Client, pod v1.Pod) (string, error) { return pod.Spec.NodeName, nil diff --git a/src/cloud-api-adaptor/test/e2e/assessment_runner.go b/src/cloud-api-adaptor/test/e2e/assessment_runner.go index 31d929568..14ec43e07 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_runner.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_runner.go @@ -507,22 +507,10 @@ func (tc *TestCase) Run() { } if tc.alternateImageName != "" { - var caaPod v1.Pod - caaPod.Namespace = "confidential-containers-system" - expectedSuccessMessage := "Choosing " + tc.alternateImageName - - pods, err := GetPodNamesByLabel(ctx, client, t, caaPod.Namespace, "app", "cloud-api-adaptor") - if err != nil { - t.Fatal(err) - } - - caaPod.Name = pods.Items[0].Name - LogString, err := ComparePodLogString(ctx, client, caaPod, expectedSuccessMessage) + err := VerifyAlternateImage(ctx, t, client, tc.alternateImageName) if err != nil { - t.Logf("Output:%s", LogString) t.Fatal(err) } - t.Logf("PodVM was brought up using the alternate PodVM image %s", tc.alternateImageName) } return ctx }). From 6012a5dc5509240b1e04d041f0d9c69a00457d50 Mon Sep 17 00:00:00 2001 From: stevenhorsman Date: Mon, 21 Oct 2024 10:29:32 +0100 Subject: [PATCH 05/11] tests/e2e: Refactor out VerifyNydusSnapshotter - Refactor out the logic that checks if nydus snapshotter was used for image pull, to make the assessment_runner flow cleaner. - Refactor IsPulledWithNydusSnapshotter to use remove the duplicated code and re-use other helper functions Signed-off-by: stevenhorsman --- .../test/e2e/assessment_helpers.go | 55 +++++++++++++------ .../test/e2e/assessment_runner.go | 19 +------ 2 files changed, 40 insertions(+), 34 deletions(-) diff --git a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go index e43df124f..4b2e630f0 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go @@ -124,33 +124,31 @@ func WatchImagePullTime(ctx context.Context, client klient.Client, caaPod v1.Pod // 11:47:42 [adaptor/proxy] CreateContainer: Ignoring PullImage before CreateContainer (cid: "") // was output func IsPulledWithNydusSnapshotter(ctx context.Context, t *testing.T, client klient.Client, nodeName string, containerId string) (bool, error) { - var podlist v1.PodList - nydusSnapshotterPullRegex, err := regexp.Compile(`.*mount_point:/run/kata-containers.*` + containerId + `.*driver:image_guest_pull.*$`) if err != nil { return false, err } - if err := client.Resources("confidential-containers-system").List(ctx, &podlist); err != nil { + var caaPod v1.Pod + caaPod.Namespace = "confidential-containers-system" + pods, err := GetPodNamesByLabel(ctx, client, t, caaPod.Namespace, "app", "cloud-api-adaptor") + if err != nil { t.Fatal(err) } - for _, pod := range podlist.Items { - if pod.Labels["app"] == "cloud-api-adaptor" && pod.Spec.NodeName == nodeName { - podLogString, err := GetPodLog(ctx, client, pod) - if err != nil { - return false, err - } + caaPod.Name = pods.Items[0].Name - podLogSlice := reverseSlice(strings.Split(podLogString, "\n")) - for _, line := range podLogSlice { - if nydusSnapshotterPullRegex.MatchString(line) { - return true, nil - } - } - return false, fmt.Errorf("Didn't find pull image for snapshotter") + podLogString, err := GetPodLog(ctx, client, caaPod) + + if err != nil { + return false, fmt.Errorf("IsPulledWithNydusSnapshotter: failed to list pods: %v", err) + } + podLogSlice := reverseSlice(strings.Split(podLogString, "\n")) + for _, line := range podLogSlice { + if nydusSnapshotterPullRegex.MatchString(line) { + return true, nil } } - return false, fmt.Errorf("No cloud-api-adaptor pod found in podList: %v", podlist.Items) + return false, fmt.Errorf("Didn't find pull image for snapshotter") } func GetPodLog(ctx context.Context, client klient.Client, pod v1.Pod) (string, error) { @@ -282,6 +280,29 @@ func VerifyAlternateImage(ctx context.Context, t *testing.T, client klient.Clien return nil } +func VerifyNydusSnapshotter(ctx context.Context, t *testing.T, client klient.Client, pod v1.Pod) error { + nodeName, err := GetNodeNameFromPod(ctx, client, pod) + if err != nil { + return fmt.Errorf("GetNodeNameFromPod failed with %v", err) + } + log.Tracef("Test pod running on node %s", nodeName) + + containerId := pod.Status.ContainerStatuses[0].ContainerID + containerId, found := strings.CutPrefix(containerId, "containerd://") + if !found { + return fmt.Errorf("VerifyNydusSnapshotter: unexpected container id format: %s", containerId) + } + + usedNydusSnapshotter, err := IsPulledWithNydusSnapshotter(ctx, t, client, nodeName, containerId) + if err != nil { + return fmt.Errorf("IsPulledWithNydusSnapshotter: failed with %v", err) + } + if !usedNydusSnapshotter { + return fmt.Errorf("Expected to pull with nydus, but that didn't happen") + } + return nil +} + func GetNodeNameFromPod(ctx context.Context, client klient.Client, customPod v1.Pod) (string, error) { var getNodeName = func(ctx context.Context, client klient.Client, pod v1.Pod) (string, error) { return pod.Spec.NodeName, nil diff --git a/src/cloud-api-adaptor/test/e2e/assessment_runner.go b/src/cloud-api-adaptor/test/e2e/assessment_runner.go index 14ec43e07..0bfd318b1 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_runner.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_runner.go @@ -450,24 +450,9 @@ func (tc *TestCase) Run() { } if tc.isNydusSnapshotter { - nodeName, err := GetNodeNameFromPod(ctx, client, *tc.pod) + err := VerifyNydusSnapshotter(ctx, t, client, *tc.pod) if err != nil { - t.Fatal(err) - } - log.Tracef("Test pod running on node %s", nodeName) - - containerId := tc.pod.Status.ContainerStatuses[0].ContainerID - containerId, found := strings.CutPrefix(containerId, "containerd://") - if !found { - t.Fatal("unexpected container id format") - } - - usedNydusSnapshotter, err := IsPulledWithNydusSnapshotter(ctx, t, client, nodeName, containerId) - if err != nil { - t.Fatal(err) - } - if !usedNydusSnapshotter { - t.Fatal("Expected to pull with nydus, but that didn't happen") + t.Error(err) } } } From 54993faa901daf4d0b1bcb0a3fefda5f632bc030 Mon Sep 17 00:00:00 2001 From: stevenhorsman Date: Mon, 21 Oct 2024 10:57:12 +0100 Subject: [PATCH 06/11] test/e2e: Create more helper functions Create new methods for VerifyCaaPodLogContains and getCaaPod to avoid duplication and improve readability. Signed-off-by: stevenhorsman --- .../test/e2e/assessment_helpers.go | 50 ++++++++++++------- 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go index 4b2e630f0..32a1d04b4 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go @@ -119,23 +119,38 @@ func WatchImagePullTime(ctx context.Context, client klient.Client, caaPod v1.Pod return pullingtime, nil } +func getCaaPod(ctx context.Context, client klient.Client, t *testing.T) (v1.Pod, error) { + var caaPod v1.Pod + caaPod.Namespace = "confidential-containers-system" + pods, err := GetPodNamesByLabel(ctx, client, t, "confidential-containers-system", "app", "cloud-api-adaptor") + if err != nil { + t.Fatal(err) + } + + if len(pods.Items) == 0 { + return nil, fmt.Errorf("getCaaPod: We didn't find the CAA pod") + } else if len(pods.Items) > 1 { + return nil, fmt.Errorf("getCaaPod: We found multiple CAA pods: %v", pods.Items) + } + + caaPod.Name = pods.Items[0].Name + return caaPod, nil +} + // Check cloud-api-adaptor daemonset pod logs to ensure that something like: // [adaptor/proxy] mount_point:/run/kata-containers//rootfs source: fstype:overlay driver:image_guest_pull // 11:47:42 [adaptor/proxy] CreateContainer: Ignoring PullImage before CreateContainer (cid: "") // was output -func IsPulledWithNydusSnapshotter(ctx context.Context, t *testing.T, client klient.Client, nodeName string, containerId string) (bool, error) { +func IsPulledWithNydusSnapshotter(ctx context.Context, t *testing.T, client klient.Client, containerId string) (bool, error) { nydusSnapshotterPullRegex, err := regexp.Compile(`.*mount_point:/run/kata-containers.*` + containerId + `.*driver:image_guest_pull.*$`) if err != nil { return false, err } - var caaPod v1.Pod - caaPod.Namespace = "confidential-containers-system" - pods, err := GetPodNamesByLabel(ctx, client, t, caaPod.Namespace, "app", "cloud-api-adaptor") + caaPod, err := getCaaPod(ctx, client, t) if err != nil { t.Fatal(err) } - caaPod.Name = pods.Items[0].Name podLogString, err := GetPodLog(ctx, client, caaPod) @@ -261,39 +276,38 @@ func CompareInstanceType(ctx context.Context, t *testing.T, client klient.Client } func VerifyAlternateImage(ctx context.Context, t *testing.T, client klient.Client, alternateImageName string) error { - var caaPod v1.Pod - caaPod.Namespace = "confidential-containers-system" expectedSuccessMessage := "Choosing " + alternateImageName + err := VerifyCaaPodLogContains(ctx, t, client, expectedSuccessMessage) + if err != nil { + return fmt.Errorf("VerifyAlternateImage: failed: %v", err) + } + t.Logf("PodVM was brought up using the alternate PodVM image %s", alternateImageName) + return nil +} - pods, err := GetPodNamesByLabel(ctx, client, t, caaPod.Namespace, "app", "cloud-api-adaptor") +func VerifyCaaPodLogContains(ctx context.Context, t *testing.T, client klient.Client, expected string) error { + caaPod, err := getCaaPod(ctx, client, t) if err != nil { t.Fatal(err) } - caaPod.Name = pods.Items[0].Name - LogString, err := ComparePodLogString(ctx, client, caaPod, expectedSuccessMessage) + LogString, err := ComparePodLogString(ctx, client, caaPod, expected) if err != nil { t.Logf("Output:%s", LogString) t.Fatal(err) } - t.Logf("PodVM was brought up using the alternate PodVM image %s", alternateImageName) + t.Logf("CAA pod log contained the expected string %s", expected) return nil } func VerifyNydusSnapshotter(ctx context.Context, t *testing.T, client klient.Client, pod v1.Pod) error { - nodeName, err := GetNodeNameFromPod(ctx, client, pod) - if err != nil { - return fmt.Errorf("GetNodeNameFromPod failed with %v", err) - } - log.Tracef("Test pod running on node %s", nodeName) - containerId := pod.Status.ContainerStatuses[0].ContainerID containerId, found := strings.CutPrefix(containerId, "containerd://") if !found { return fmt.Errorf("VerifyNydusSnapshotter: unexpected container id format: %s", containerId) } - usedNydusSnapshotter, err := IsPulledWithNydusSnapshotter(ctx, t, client, nodeName, containerId) + usedNydusSnapshotter, err := IsPulledWithNydusSnapshotter(ctx, t, client, containerId) if err != nil { return fmt.Errorf("IsPulledWithNydusSnapshotter: failed with %v", err) } From 98855c2d744131b1581ca466ca23628521660016 Mon Sep 17 00:00:00 2001 From: stevenhorsman Date: Mon, 21 Oct 2024 11:01:00 +0100 Subject: [PATCH 07/11] test/e2e: Remove fail reason test case option No tests are using the `WithFailReason` option, so remove the code to reduce clutter Signed-off-by: stevenhorsman --- .../test/e2e/assessment_runner.go | 45 ------------------- 1 file changed, 45 deletions(-) diff --git a/src/cloud-api-adaptor/test/e2e/assessment_runner.go b/src/cloud-api-adaptor/test/e2e/assessment_runner.go index 0bfd318b1..e569880c5 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_runner.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_runner.go @@ -12,7 +12,6 @@ import ( "testing" "time" - log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" @@ -70,7 +69,6 @@ type TestCase struct { deletionWithin time.Duration expectedInstanceType string isNydusSnapshotter bool - FailReason string alternateImageName string } @@ -169,11 +167,6 @@ func (tc *TestCase) WithNydusSnapshotter() *TestCase { return tc } -func (tc *TestCase) WithFailReason(reason string) *TestCase { - tc.FailReason = reason - return tc -} - func (tc *TestCase) Run() { testCaseFeature := features.New(fmt.Sprintf("%s test", tc.testName)). WithSetup("Create testworkload", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { @@ -411,44 +404,6 @@ func (tc *TestCase) Run() { tc.assert.HasPodVM(t, tc.pod.Name) } - if tc.podState != v1.PodRunning && tc.podState != v1.PodSucceeded { - profile, error := tc.assert.GetInstanceType(t, tc.pod.Name) - if error != nil { - if error.Error() != "Failed to Create PodVM Instance" { - t.Fatal(error) - } - } else if profile != "" { - t.Logf("PodVM Created with Instance Type %v", profile) - if tc.FailReason != "" { - var podlist v1.PodList - var podLogString string - if err := client.Resources("confidential-containers-system").List(ctx, &podlist); err != nil { - t.Fatal(err) - } - for _, pod := range podlist.Items { - if pod.Labels["app"] == "cloud-api-adaptor" { - podLogString, _ = GetPodLog(ctx, client, pod) - break - } - } - if strings.Contains(podLogString, tc.FailReason) { - t.Logf("failed due to expected reason %s", tc.FailReason) - } else { - t.Logf("cloud-api-adaptor pod logs: %s", podLogString) - yamlData, err := yaml.Marshal(tc.pod.Status) - if err != nil { - log.Errorf("Error marshaling pod.Status to JSON: %s", err.Error()) - } else { - t.Logf("Current Pod State: %v", string(yamlData)) - } - t.Fatal("failed due to unknown reason") - } - } else { - t.Logf("Pod Failed If you want to cross check please give .WithFailReason(error string)") - } - } - } - if tc.isNydusSnapshotter { err := VerifyNydusSnapshotter(ctx, t, client, *tc.pod) if err != nil { From 01254bc39566e1a7f3e07735a279d7a836fee30e Mon Sep 17 00:00:00 2001 From: stevenhorsman Date: Mon, 21 Oct 2024 14:22:25 +0100 Subject: [PATCH 08/11] test/e2e: Remove t.Fatal from Assess methods The [Go Wiki]|(https://go.dev/wiki/TestComments) states > On a practical level, prefer calling t.Error over t.Fatal. > t.Fatal is usually only appropriate when some piece > of test setup fails, without which you cannot run the > test at all. - Update t.Fatal's in assess methods to t.Error's - Pass pointers to pods rather than the Pod, like the Kubernetes e2e pod base does Signed-off-by: stevenhorsman --- .../test/e2e/assessment_helpers.go | 55 +++++++++---------- .../test/e2e/assessment_runner.go | 41 ++++++-------- .../test/e2e/nginx_deployment.go | 2 +- 3 files changed, 44 insertions(+), 54 deletions(-) diff --git a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go index 32a1d04b4..7c8e583f5 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go @@ -73,7 +73,7 @@ func NewExtraPod(namespace string, podName string, containerName string, imageNa return extPod } -func WatchImagePullTime(ctx context.Context, client klient.Client, caaPod v1.Pod, pod v1.Pod) (string, error) { +func WatchImagePullTime(ctx context.Context, client klient.Client, caaPod *v1.Pod, pod *v1.Pod) (string, error) { pullingtime := "" var startTime, endTime time.Time @@ -119,12 +119,15 @@ func WatchImagePullTime(ctx context.Context, client klient.Client, caaPod v1.Pod return pullingtime, nil } -func getCaaPod(ctx context.Context, client klient.Client, t *testing.T) (v1.Pod, error) { - var caaPod v1.Pod - caaPod.Namespace = "confidential-containers-system" - pods, err := GetPodNamesByLabel(ctx, client, t, "confidential-containers-system", "app", "cloud-api-adaptor") +func getCaaPod(ctx context.Context, client klient.Client, t *testing.T) (*v1.Pod, error) { + caaPod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "confidential-containers-system", + }, + } + pods, err := GetPodNamesByLabel(ctx, client, t, caaPod.GetObjectMeta().GetNamespace(), "app", "cloud-api-adaptor") if err != nil { - t.Fatal(err) + return nil, fmt.Errorf("getCaaPod: GetPodNamesByLabel failed: %v", err) } if len(pods.Items) == 0 { @@ -149,11 +152,10 @@ func IsPulledWithNydusSnapshotter(ctx context.Context, t *testing.T, client klie caaPod, err := getCaaPod(ctx, client, t) if err != nil { - t.Fatal(err) + return false, fmt.Errorf("IsPulledWithNydusSnapshotter: failed to get CAA pod: %v", err) } podLogString, err := GetPodLog(ctx, client, caaPod) - if err != nil { return false, fmt.Errorf("IsPulledWithNydusSnapshotter: failed to list pods: %v", err) } @@ -166,7 +168,7 @@ func IsPulledWithNydusSnapshotter(ctx context.Context, t *testing.T, client klie return false, fmt.Errorf("Didn't find pull image for snapshotter") } -func GetPodLog(ctx context.Context, client klient.Client, pod v1.Pod) (string, error) { +func GetPodLog(ctx context.Context, client klient.Client, pod *v1.Pod) (string, error) { clientset, err := kubernetes.NewForConfig(client.RESTConfig()) if err != nil { return "", err @@ -186,7 +188,7 @@ func GetPodLog(ctx context.Context, client klient.Client, pod v1.Pod) (string, e return strings.TrimSpace(buf.String()), nil } -func ComparePodLogString(ctx context.Context, client klient.Client, customPod v1.Pod, expectedPodLogString string) (string, error) { +func ComparePodLogString(ctx context.Context, client klient.Client, customPod *v1.Pod, expectedPodLogString string) (string, error) { //adding sleep time to initialize container and ready for logging time.Sleep(5 * time.Second) @@ -203,7 +205,7 @@ func ComparePodLogString(ctx context.Context, client klient.Client, customPod v1 } // Note: there are currently two event types: Normal and Warning, so Warning includes errors -func GetPodEventWarningDescriptions(ctx context.Context, client klient.Client, pod v1.Pod) (string, error) { +func GetPodEventWarningDescriptions(ctx context.Context, client klient.Client, pod *v1.Pod) (string, error) { clientset, err := kubernetes.NewForConfig(client.RESTConfig()) if err != nil { return "", err @@ -226,7 +228,7 @@ func GetPodEventWarningDescriptions(ctx context.Context, client klient.Client, p // This function takes an expected pod event "warning" string (note warning also covers errors) and checks to see if it // shows up in the event log of the pod. Some pods error in failed state, so can be immediately checks, others fail // in waiting state (e.g. ImagePullBackoff errors), so we need to poll for errors showing up on these pods -func ComparePodEventWarningDescriptions(ctx context.Context, t *testing.T, client klient.Client, pod v1.Pod, expectedPodEvent string) error { +func ComparePodEventWarningDescriptions(ctx context.Context, t *testing.T, client klient.Client, pod *v1.Pod, expectedPodEvent string) error { retries := 1 delay := 10 * time.Second @@ -263,12 +265,12 @@ func CompareInstanceType(ctx context.Context, t *testing.T, client klient.Client if podItem.ObjectMeta.Name == pod.Name { instanceType, err := getInstanceTypeFn(t, pod.Name) if err != nil { - t.Fatal(err) + return fmt.Errorf("CompareInstanceType: failed to getCaaPod: %v", err) } if instanceType == expectedInstanceType { return nil } else { - return fmt.Errorf("error: Pod instance type was %s, but we expected %s ", instanceType, expectedInstanceType) + return fmt.Errorf("CompareInstanceType: instance type was %s, but we expected %s ", instanceType, expectedInstanceType) } } } @@ -288,19 +290,18 @@ func VerifyAlternateImage(ctx context.Context, t *testing.T, client klient.Clien func VerifyCaaPodLogContains(ctx context.Context, t *testing.T, client klient.Client, expected string) error { caaPod, err := getCaaPod(ctx, client, t) if err != nil { - t.Fatal(err) + return fmt.Errorf("VerifyCaaPodLogContains: failed to getCaaPod: %v", err) } LogString, err := ComparePodLogString(ctx, client, caaPod, expected) if err != nil { - t.Logf("Output:%s", LogString) - t.Fatal(err) + return fmt.Errorf("VerifyCaaPodLogContains: failed to ComparePodLogString: logString: %s error %v", LogString, err) } t.Logf("CAA pod log contained the expected string %s", expected) return nil } -func VerifyNydusSnapshotter(ctx context.Context, t *testing.T, client klient.Client, pod v1.Pod) error { +func VerifyNydusSnapshotter(ctx context.Context, t *testing.T, client klient.Client, pod *v1.Pod) error { containerId := pod.Status.ContainerStatuses[0].ContainerID containerId, found := strings.CutPrefix(containerId, "containerd://") if !found { @@ -317,8 +318,8 @@ func VerifyNydusSnapshotter(ctx context.Context, t *testing.T, client klient.Cli return nil } -func GetNodeNameFromPod(ctx context.Context, client klient.Client, customPod v1.Pod) (string, error) { - var getNodeName = func(ctx context.Context, client klient.Client, pod v1.Pod) (string, error) { +func GetNodeNameFromPod(ctx context.Context, client klient.Client, customPod *v1.Pod) (string, error) { + var getNodeName = func(ctx context.Context, client klient.Client, pod *v1.Pod) (string, error) { return pod.Spec.NodeName, nil } return getStringFromPod(ctx, client, customPod, getNodeName) @@ -505,7 +506,7 @@ func ProvisionPod(ctx context.Context, client klient.Client, t *testing.T, pod * } if actualPod.Status.Phase == v1.PodRunning { fmt.Printf("Log of the pod %.v \n===================\n", actualPod.Name) - podLogString, _ := GetPodLog(ctx, client, *actualPod) + podLogString, _ := GetPodLog(ctx, client, actualPod) fmt.Println(podLogString) fmt.Printf("===================\n") } @@ -633,29 +634,27 @@ func GetPodNamesByLabel(ctx context.Context, client klient.Client, t *testing.T, clientset, err := kubernetes.NewForConfig(client.RESTConfig()) if err != nil { - t.Fatal(err) - return nil, err + return nil, fmt.Errorf("GetPodNamesByLabel: get Kubernetes clientSef failed: %v", err) } pods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelName + "=" + labelValue}) if err != nil { - t.Fatal(err) - return nil, err + return nil, fmt.Errorf("GetPodNamesByLabel: get pod list failed: %v", err) } return pods, nil } -type podToString func(context.Context, klient.Client, v1.Pod) (string, error) +type podToString func(context.Context, klient.Client, *v1.Pod) (string, error) -func getStringFromPod(ctx context.Context, client klient.Client, pod v1.Pod, fn podToString) (string, error) { +func getStringFromPod(ctx context.Context, client klient.Client, pod *v1.Pod, fn podToString) (string, error) { var podlist v1.PodList if err := client.Resources(pod.Namespace).List(ctx, &podlist); err != nil { return "", err } for _, podItem := range podlist.Items { if podItem.ObjectMeta.Name == pod.Name { - return fn(ctx, client, podItem) + return fn(ctx, client, &podItem) } } return "", fmt.Errorf("no pod matching %v, was found", pod) diff --git a/src/cloud-api-adaptor/test/e2e/assessment_runner.go b/src/cloud-api-adaptor/test/e2e/assessment_runner.go index e569880c5..e1891fc14 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_runner.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_runner.go @@ -273,7 +273,7 @@ func (tc *TestCase) Run() { } if pod.Status.Phase == v1.PodRunning { t.Logf("Log of the pod %.v \n===================\n", pod.Name) - podLogString, _ := GetPodLog(ctx, client, *pod) + podLogString, _ := GetPodLog(ctx, client, pod) t.Log(podLogString) t.Logf("===================\n") } @@ -339,7 +339,7 @@ func (tc *TestCase) Run() { } for _, caaPod := range podlist.Items { if caaPod.Labels["app"] == "cloud-api-adaptor" { - imagePullTime, err := WatchImagePullTime(ctx, client, caaPod, *tc.pod) + imagePullTime, err := WatchImagePullTime(ctx, client, &caaPod, tc.pod) if err != nil { t.Fatal(err) } @@ -350,64 +350,57 @@ func (tc *TestCase) Run() { } if tc.expectedPodLogString != "" { - LogString, err := ComparePodLogString(ctx, client, *tc.pod, tc.expectedPodLogString) + logString, err := ComparePodLogString(ctx, client, tc.pod, tc.expectedPodLogString) if err != nil { - t.Logf("Output:%s", LogString) - t.Fatal(err) + t.Errorf("Looking for %s, in pod log: %s, failed with: %v", tc.expectedPodLogString, logString, err) } - t.Logf("Log output of peer pod:%s", LogString) } if tc.expectedPodEventErrorString != "" { - err := ComparePodEventWarningDescriptions(ctx, t, client, *tc.pod, tc.expectedPodEventErrorString) + err := ComparePodEventWarningDescriptions(ctx, t, client, tc.pod, tc.expectedPodEventErrorString) if err != nil { - t.Fatal(err) + t.Errorf("Looking for %s, in pod events log, failed with: %v", tc.expectedPodEventErrorString, err) } } else { // There shouldn't have been any pod event warnings/errors - warnings, err := GetPodEventWarningDescriptions(ctx, client, *tc.pod) + warnings, err := GetPodEventWarningDescriptions(ctx, client, tc.pod) if err != nil { - t.Fatal(err) + t.Errorf("We hit an error trying to get the event log of %s", tc.pod.Name) } if warnings != "" { - t.Fatal(fmt.Errorf("unexpected warning/error event(s): %s", warnings)) + t.Errorf("unexpected warning/error event(s): %s", warnings) } } if tc.expectedInstanceType != "" { err := CompareInstanceType(ctx, t, client, *tc.pod, tc.expectedPodEventErrorString, tc.assert.GetInstanceType) if err != nil { - t.Fatal(err) + t.Errorf("CompareInstanceType failed: %v", err) } } if tc.podState == v1.PodRunning { - if err := client.Resources(tc.pod.Namespace).List(ctx, &podlist); err != nil { - t.Fatal(err) - } if len(tc.testCommands) > 0 { if len(tc.testCommands) > 0 { logString, err := AssessPodTestCommands(ctx, client, tc.pod, tc.testCommands) - t.Logf("Output when execute test commands: %s", logString) if err != nil { - t.Fatal(err) + t.Errorf("AssessPodTestCommands failed, with output: %s and error: %v", logString, err) } } } err := AssessPodRequestAndLimit(ctx, client, tc.pod) if err != nil { - t.Logf("request and limit for podvm extended resource are not set to 1") - t.Fatal(err) + t.Errorf("request and limit for podvm extended resource are not set to 1: %v", err) } tc.assert.HasPodVM(t, tc.pod.Name) } if tc.isNydusSnapshotter { - err := VerifyNydusSnapshotter(ctx, t, client, *tc.pod) + err := VerifyNydusSnapshotter(ctx, t, client, tc.pod) if err != nil { - t.Error(err) + t.Errorf("VerifyNydusSnapshotter failed: %v", err) } } } @@ -419,7 +412,7 @@ func (tc *TestCase) Run() { t.Fatal("Please implement assess logic for imagePullTimer") } if extraPod.expectedPodLogString != "" { - LogString, err := ComparePodLogString(ctx, client, *extraPod.pod, extraPod.expectedPodLogString) + LogString, err := ComparePodLogString(ctx, client, extraPod.pod, extraPod.expectedPodLogString) if err != nil { t.Logf("Output:%s", LogString) t.Fatal(err) @@ -441,15 +434,13 @@ func (tc *TestCase) Run() { // TBD t.Fatal("Error: isNydusSnapshotter hasn't been implemented in extraPods. Please implement assess function for isNydusSnapshotter.") } - } - } if tc.alternateImageName != "" { err := VerifyAlternateImage(ctx, t, client, tc.alternateImageName) if err != nil { - t.Fatal(err) + t.Errorf("VerifyAlternateImage failed: %v", err) } } return ctx diff --git a/src/cloud-api-adaptor/test/e2e/nginx_deployment.go b/src/cloud-api-adaptor/test/e2e/nginx_deployment.go index 6cf685b4f..50545fac7 100644 --- a/src/cloud-api-adaptor/test/e2e/nginx_deployment.go +++ b/src/cloud-api-adaptor/test/e2e/nginx_deployment.go @@ -181,7 +181,7 @@ func waitForNginxDeploymentAvailable(ctx context.Context, t *testing.T, client k if pod.Status.Phase == v1.PodRunning { t.Logf("Log of the pod %.v", pod.Name) t.Logf("===================") - podLogString, _ := GetPodLog(ctx, client, pod) + podLogString, _ := GetPodLog(ctx, client, &pod) t.Logf(podLogString) t.Logf("===================") } From 3790a9abb842309c6bb17b05ea80a2056b7ac914 Mon Sep 17 00:00:00 2001 From: stevenhorsman Date: Mon, 21 Oct 2024 14:26:16 +0100 Subject: [PATCH 09/11] test/e2e: Refactor VerifyImagePullTimer Refactor out the logic that checks if the image pull timing, to make the assessment_runner flow cleaner. Signed-off-by: stevenhorsman --- .../test/e2e/assessment_helpers.go | 14 ++++++++++++++ .../test/e2e/assessment_runner.go | 15 +++------------ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go index 7c8e583f5..b65b8d927 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go @@ -318,6 +318,20 @@ func VerifyNydusSnapshotter(ctx context.Context, t *testing.T, client klient.Cli return nil } +func VerifyImagePullTimer(ctx context.Context, t *testing.T, client klient.Client, pod *v1.Pod) error { + caaPod, err := getCaaPod(ctx, client, t) + if err != nil { + return fmt.Errorf("VerifyImagePullTimer: failed to getCaaPod: %v", err) + } + + imagePullTime, err := WatchImagePullTime(ctx, client, caaPod, pod) + if err != nil { + return fmt.Errorf("VerifyImagePullTimer: WatchImagePullTime failed with %v", err) + } + t.Logf("Time Taken to pull 4GB Image: %s", imagePullTime) + return nil +} + func GetNodeNameFromPod(ctx context.Context, client klient.Client, customPod *v1.Pod) (string, error) { var getNodeName = func(ctx context.Context, client klient.Client, pod *v1.Pod) (string, error) { return pod.Spec.NodeName, nil diff --git a/src/cloud-api-adaptor/test/e2e/assessment_runner.go b/src/cloud-api-adaptor/test/e2e/assessment_runner.go index e1891fc14..dbf9b3e0f 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_runner.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_runner.go @@ -334,18 +334,9 @@ func (tc *TestCase) Run() { if tc.pod != nil { if tc.imagePullTimer { - if err := client.Resources("confidential-containers-system").List(ctx, &podlist); err != nil { - t.Fatal(err) - } - for _, caaPod := range podlist.Items { - if caaPod.Labels["app"] == "cloud-api-adaptor" { - imagePullTime, err := WatchImagePullTime(ctx, client, &caaPod, tc.pod) - if err != nil { - t.Fatal(err) - } - t.Logf("Time Taken to pull 4GB Image: %s", imagePullTime) - break - } + err := VerifyImagePullTimer(ctx, t, client, tc.pod) + if err != nil { + t.Error(err) } } From e1767d265c9c1a5cc1ed80d6afe42792dc16720f Mon Sep 17 00:00:00 2001 From: stevenhorsman Date: Tue, 22 Oct 2024 17:28:51 +0100 Subject: [PATCH 10/11] versions: Remove rust version After #2121 the rust version is no longer needed Signed-off-by: stevenhorsman --- src/cloud-api-adaptor/versions.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/src/cloud-api-adaptor/versions.yaml b/src/cloud-api-adaptor/versions.yaml index 080462b93..9b349a23d 100644 --- a/src/cloud-api-adaptor/versions.yaml +++ b/src/cloud-api-adaptor/versions.yaml @@ -21,7 +21,6 @@ cloudimg: tools: bats: 1.10.0 golang: 1.22.7 - rust: 1.75.0 protoc: 3.15.0 packer: v1.9.4 kcli: 99.0.202407031308 From 69e05f098da51b8e72172069b4a2e0cbbd0b66c2 Mon Sep 17 00:00:00 2001 From: stevenhorsman Date: Tue, 29 Oct 2024 15:27:24 +0000 Subject: [PATCH 11/11] tests: e2e: Add nodeName filter to getCaaPod As Wainer pointed out in the review, if we ran the e2e tests on a cluster with multiple worker nodes, then our logic for only checking for a single CAA pod might fail, so add an additional nodeName filter, so get the CAA that is running on the same node as the pod. Signed-off-by: stevenhorsman --- .../test/e2e/assessment_helpers.go | 41 +++++++++++++------ .../test/e2e/assessment_runner.go | 14 +++---- 2 files changed, 36 insertions(+), 19 deletions(-) diff --git a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go index b65b8d927..7de1951d8 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_helpers.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_helpers.go @@ -119,13 +119,13 @@ func WatchImagePullTime(ctx context.Context, client klient.Client, caaPod *v1.Po return pullingtime, nil } -func getCaaPod(ctx context.Context, client klient.Client, t *testing.T) (*v1.Pod, error) { +func getCaaPod(ctx context.Context, client klient.Client, t *testing.T, nodeName string) (*v1.Pod, error) { caaPod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: "confidential-containers-system", }, } - pods, err := GetPodNamesByLabel(ctx, client, t, caaPod.GetObjectMeta().GetNamespace(), "app", "cloud-api-adaptor") + pods, err := GetPodNamesByLabel(ctx, client, t, caaPod.GetObjectMeta().GetNamespace(), "app", "cloud-api-adaptor", nodeName) if err != nil { return nil, fmt.Errorf("getCaaPod: GetPodNamesByLabel failed: %v", err) } @@ -144,13 +144,13 @@ func getCaaPod(ctx context.Context, client klient.Client, t *testing.T) (*v1.Pod // [adaptor/proxy] mount_point:/run/kata-containers//rootfs source: fstype:overlay driver:image_guest_pull // 11:47:42 [adaptor/proxy] CreateContainer: Ignoring PullImage before CreateContainer (cid: "") // was output -func IsPulledWithNydusSnapshotter(ctx context.Context, t *testing.T, client klient.Client, containerId string) (bool, error) { +func IsPulledWithNydusSnapshotter(ctx context.Context, t *testing.T, client klient.Client, nodeName string, containerId string) (bool, error) { nydusSnapshotterPullRegex, err := regexp.Compile(`.*mount_point:/run/kata-containers.*` + containerId + `.*driver:image_guest_pull.*$`) if err != nil { return false, err } - caaPod, err := getCaaPod(ctx, client, t) + caaPod, err := getCaaPod(ctx, client, t, nodeName) if err != nil { return false, fmt.Errorf("IsPulledWithNydusSnapshotter: failed to get CAA pod: %v", err) } @@ -277,9 +277,14 @@ func CompareInstanceType(ctx context.Context, t *testing.T, client klient.Client return fmt.Errorf("no pod matching %v, was found", pod) } -func VerifyAlternateImage(ctx context.Context, t *testing.T, client klient.Client, alternateImageName string) error { +func VerifyAlternateImage(ctx context.Context, t *testing.T, client klient.Client, pod *v1.Pod, alternateImageName string) error { + nodeName, err := GetNodeNameFromPod(ctx, client, pod) + if err != nil { + return fmt.Errorf("VerifyAlternateImage: GetNodeNameFromPod failed with %v", err) + } + expectedSuccessMessage := "Choosing " + alternateImageName - err := VerifyCaaPodLogContains(ctx, t, client, expectedSuccessMessage) + err = VerifyCaaPodLogContains(ctx, t, client, nodeName, expectedSuccessMessage) if err != nil { return fmt.Errorf("VerifyAlternateImage: failed: %v", err) } @@ -287,8 +292,8 @@ func VerifyAlternateImage(ctx context.Context, t *testing.T, client klient.Clien return nil } -func VerifyCaaPodLogContains(ctx context.Context, t *testing.T, client klient.Client, expected string) error { - caaPod, err := getCaaPod(ctx, client, t) +func VerifyCaaPodLogContains(ctx context.Context, t *testing.T, client klient.Client, nodeName, expected string) error { + caaPod, err := getCaaPod(ctx, client, t, nodeName) if err != nil { return fmt.Errorf("VerifyCaaPodLogContains: failed to getCaaPod: %v", err) } @@ -302,13 +307,19 @@ func VerifyCaaPodLogContains(ctx context.Context, t *testing.T, client klient.Cl } func VerifyNydusSnapshotter(ctx context.Context, t *testing.T, client klient.Client, pod *v1.Pod) error { + nodeName, err := GetNodeNameFromPod(ctx, client, pod) + if err != nil { + return fmt.Errorf("VerifyNydusSnapshotter: GetNodeNameFromPod failed with %v", err) + } + log.Tracef("Test pod running on node %s", nodeName) + containerId := pod.Status.ContainerStatuses[0].ContainerID containerId, found := strings.CutPrefix(containerId, "containerd://") if !found { return fmt.Errorf("VerifyNydusSnapshotter: unexpected container id format: %s", containerId) } - usedNydusSnapshotter, err := IsPulledWithNydusSnapshotter(ctx, t, client, containerId) + usedNydusSnapshotter, err := IsPulledWithNydusSnapshotter(ctx, t, client, nodeName, containerId) if err != nil { return fmt.Errorf("IsPulledWithNydusSnapshotter: failed with %v", err) } @@ -319,7 +330,12 @@ func VerifyNydusSnapshotter(ctx context.Context, t *testing.T, client klient.Cli } func VerifyImagePullTimer(ctx context.Context, t *testing.T, client klient.Client, pod *v1.Pod) error { - caaPod, err := getCaaPod(ctx, client, t) + nodeName, err := GetNodeNameFromPod(ctx, client, pod) + if err != nil { + return fmt.Errorf("VerifyImagePullTimer: GetNodeNameFromPod failed with %v", err) + } + + caaPod, err := getCaaPod(ctx, client, t, nodeName) if err != nil { return fmt.Errorf("VerifyImagePullTimer: failed to getCaaPod: %v", err) } @@ -644,14 +660,15 @@ func AddImagePullSecretToDefaultServiceAccount(ctx context.Context, client klien return nil } -func GetPodNamesByLabel(ctx context.Context, client klient.Client, t *testing.T, namespace string, labelName string, labelValue string) (*v1.PodList, error) { +func GetPodNamesByLabel(ctx context.Context, client klient.Client, t *testing.T, namespace string, labelName string, labelValue string, nodeName string) (*v1.PodList, error) { clientset, err := kubernetes.NewForConfig(client.RESTConfig()) if err != nil { return nil, fmt.Errorf("GetPodNamesByLabel: get Kubernetes clientSef failed: %v", err) } - pods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelName + "=" + labelValue}) + nodeSelector := fmt.Sprintf("spec.nodeName=%s", nodeName) + pods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelName + "=" + labelValue, FieldSelector: nodeSelector}) if err != nil { return nil, fmt.Errorf("GetPodNamesByLabel: get pod list failed: %v", err) } diff --git a/src/cloud-api-adaptor/test/e2e/assessment_runner.go b/src/cloud-api-adaptor/test/e2e/assessment_runner.go index dbf9b3e0f..28a1d44da 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_runner.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_runner.go @@ -394,6 +394,13 @@ func (tc *TestCase) Run() { t.Errorf("VerifyNydusSnapshotter failed: %v", err) } } + + if tc.alternateImageName != "" { + err := VerifyAlternateImage(ctx, t, client, tc.pod, tc.alternateImageName) + if err != nil { + t.Errorf("VerifyAlternateImage failed: %v", err) + } + } } if tc.extraPods != nil { @@ -427,13 +434,6 @@ func (tc *TestCase) Run() { } } } - - if tc.alternateImageName != "" { - err := VerifyAlternateImage(ctx, t, client, tc.alternateImageName) - if err != nil { - t.Errorf("VerifyAlternateImage failed: %v", err) - } - } return ctx }). Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {