From 9f2479bdcbdf86c557745821af5e7aa82a0f0a38 Mon Sep 17 00:00:00 2001 From: Da Li Liu Date: Tue, 28 Nov 2023 10:44:45 +0800 Subject: [PATCH] e2e: use busybox to reduce test execution time - use busybox to reduce test execution time - add a new nginx deployment test - reduce WAIT_POD_RUNNING_TIMEOUT from 900 to 600 fixes for #1450 Signed-off-by: Da Li Liu --- test/e2e/assessment_runner_test.go | 65 ++++------ test/e2e/common.go | 34 ++--- test/e2e/common_suite_test.go | 45 ++++--- test/e2e/libvirt_test.go | 5 + test/e2e/nginx_deployment_test.go | 193 +++++++++++++++++++++++++++++ 5 files changed, 258 insertions(+), 84 deletions(-) create mode 100644 test/e2e/nginx_deployment_test.go diff --git a/test/e2e/assessment_runner_test.go b/test/e2e/assessment_runner_test.go index 656942a2ac..8a21a0acac 100644 --- a/test/e2e/assessment_runner_test.go +++ b/test/e2e/assessment_runner_test.go @@ -11,6 +11,7 @@ import ( "time" log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -21,7 +22,7 @@ import ( "sigs.k8s.io/e2e-framework/pkg/features" ) -const WAIT_POD_RUNNING_TIMEOUT = time.Second * 900 +const WAIT_POD_RUNNING_TIMEOUT = time.Second * 600 const WAIT_JOB_RUNNING_TIMEOUT = time.Second * 600 // testCommand is a list of commands to execute inside the pod container, @@ -189,45 +190,33 @@ func (tc *testCase) run() { if err = wait.For(conditions.New(client.Resources()).PodPhaseMatch(tc.pod, tc.podState), wait.WithTimeout(WAIT_POD_RUNNING_TIMEOUT)); err != nil { t.Fatal(err) } - if tc.podState == v1.PodRunning || tc.pod.Spec.Containers[0].ReadinessProbe != nil { - clientset, err := kubernetes.NewForConfig(client.RESTConfig()) - if err != nil { - t.Fatal(err) - } - pod, err := clientset.CoreV1().Pods(tc.pod.Namespace).Get(ctx, tc.pod.Name, metav1.GetOptions{}) - if err != nil { + if tc.podState == v1.PodRunning || len(tc.testCommands) > 0 { + t.Logf("Waiting for containers in pod: %v are ready", tc.pod.Name) + if err = wait.For(conditions.New(client.Resources()).ContainersReady(tc.pod), wait.WithTimeout(WAIT_POD_RUNNING_TIMEOUT)); err != nil { + //Added logs for debugging nightly tests + clientset, err := kubernetes.NewForConfig(client.RESTConfig()) + if err != nil { + t.Fatal(err) + } + pod, err := clientset.CoreV1().Pods(tc.pod.Namespace).Get(ctx, tc.pod.Name, metav1.GetOptions{}) + if err != nil { + t.Fatal(err) + } + t.Logf("Expected Pod State: %v", tc.podState) + yamlData, err := yaml.Marshal(pod.Status) + if err != nil { + fmt.Println("Error marshaling pod.Status to YAML: ", err.Error()) + } else { + t.Logf("Current Pod State: %v", string(yamlData)) + } + if pod.Status.Phase == v1.PodRunning { + fmt.Printf("Log of the pod %.v \n===================\n", pod.Name) + podLogString, _ := getPodLog(ctx, client, *pod) + fmt.Println(podLogString) + fmt.Printf("===================\n") + } t.Fatal(err) } - //Added logs for debugging nightly tests - t.Logf("Expected Pod State: %v", tc.podState) - t.Logf("Current Pod State: %v", pod.Status.Phase) - //Getting Readiness probe of a container - for i, condition := range pod.Status.Conditions { - fmt.Printf("===================\n") - fmt.Printf("Checking Conditons - %v....\n", i+1) - fmt.Printf("===================\n") - fmt.Printf("*.Condition Type: %v\n", condition.Type) - fmt.Printf("*.Condition Status: %v\n", condition.Status) - fmt.Printf("*.Condition Last Probe Time: %v\n", condition.LastProbeTime) - fmt.Printf("*.Condition Last Transition Time: %v\n", condition.LastTransitionTime) - fmt.Printf("*.Condition Last Message: %v\n", condition.Message) - fmt.Printf("*.Condition Last Reason: %v\n", condition.Reason) - } - - readinessProbe := pod.Spec.Containers[0].ReadinessProbe - if readinessProbe != nil { - fmt.Printf("===================\n") - fmt.Printf("Checking Readiness Probe....\n") - fmt.Printf("===================\n") - fmt.Printf("*.Initial Delay Seconds: %v\n", readinessProbe.InitialDelaySeconds) - fmt.Printf("*.Timeout Seconds: %v\n", readinessProbe.TimeoutSeconds) - fmt.Printf("*.Success Threshold: %v\n", readinessProbe.SuccessThreshold) - fmt.Printf("*.Failure Threshold: %v\n", readinessProbe.FailureThreshold) - fmt.Printf("*.Period Seconds: %v\n", readinessProbe.PeriodSeconds) - fmt.Printf("*.Probe Handler: %v\n", readinessProbe.ProbeHandler) - fmt.Printf("*.Probe Handler Port: %v\n", readinessProbe.ProbeHandler.HTTPGet.Port) - fmt.Printf("===================\n") - } } } diff --git a/test/e2e/common.go b/test/e2e/common.go index d9698f5d01..f0beb1983a 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -8,9 +8,10 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" ) +const BUSYBOX_IMAGE = "quay.io/prometheus/busybox:latest" + type podOption func(*corev1.Pod) func withRestartPolicy(restartPolicy corev1.RestartPolicy) podOption { @@ -19,23 +20,6 @@ func withRestartPolicy(restartPolicy corev1.RestartPolicy) podOption { } } -// Optional method to add ContainerPort and ReadinessProbe to listen Port 80 -func withContainerPort(port int32) podOption { - return func(p *corev1.Pod) { - p.Spec.Containers[0].Ports = []corev1.ContainerPort{{ContainerPort: port}} - p.Spec.Containers[0].ReadinessProbe = &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/", - Port: intstr.FromInt(int(port)), - }, - }, - InitialDelaySeconds: 10, - PeriodSeconds: 5, - } - } -} - func withCommand(command []string) podOption { return func(p *corev1.Pod) { p.Spec.Containers[0].Command = command @@ -108,7 +92,7 @@ func newPod(namespace string, podName string, containerName string, imageName st } func newNginxPod(namespace string) *corev1.Pod { - return newPod(namespace, "nginx", "nginx", "nginx", withRestartPolicy(corev1.RestartPolicyNever)) + return newNginxPodWithName(namespace, "nginx") } func newNginxPodWithName(namespace string, podName string) *corev1.Pod { @@ -116,11 +100,15 @@ func newNginxPodWithName(namespace string, podName string) *corev1.Pod { } func newBusyboxPod(namespace string) *corev1.Pod { - return newPod(namespace, "busybox-pod", "busybox", "quay.io/prometheus/busybox:latest", withCommand([]string{"/bin/sh", "-c", "sleep 3600"})) + return newBusyboxPodWithName(namespace, "busybox") +} + +func newBusyboxPodWithName(namespace, podName string) *corev1.Pod { + return newPod(namespace, podName, "busybox", BUSYBOX_IMAGE, withCommand([]string{"/bin/sh", "-c", "sleep 3600"})) } // newConfigMap returns a new config map object. -func newConfigMap(namespace string, name string, configMapData map[string]string) *corev1.ConfigMap { +func newConfigMap(namespace, name string, configMapData map[string]string) *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, Data: configMapData, @@ -128,7 +116,7 @@ func newConfigMap(namespace string, name string, configMapData map[string]string } // newSecret returns a new secret object. -func newSecret(namespace string, name string, data map[string][]byte, secretType corev1.SecretType) *corev1.Secret { +func newSecret(namespace, name string, data map[string][]byte, secretType corev1.SecretType) *corev1.Secret { return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, Data: data, @@ -137,7 +125,7 @@ func newSecret(namespace string, name string, data map[string][]byte, secretType } // newJob returns a new job -func newJob(namespace string, name string) *batchv1.Job { +func newJob(namespace, name string) *batchv1.Job { runtimeClassName := "kata-remote" // Comment out adding runtime-handler until nydus-snapshotter is stable // annotationData := map[string]string{ diff --git a/test/e2e/common_suite_test.go b/test/e2e/common_suite_test.go index eeff07923e..1c7a49e1c0 100644 --- a/test/e2e/common_suite_test.go +++ b/test/e2e/common_suite_test.go @@ -14,7 +14,6 @@ import ( "time" log "github.com/sirupsen/logrus" - corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" envconf "sigs.k8s.io/e2e-framework/pkg/envconf" ) @@ -22,7 +21,7 @@ import ( // doTestCreateSimplePod tests a simple peer-pod can be created. func doTestCreateSimplePod(t *testing.T, assert CloudAssert) { namespace := envconf.RandomName("default", 7) - pod := newNginxPod(namespace) + pod := newNginxPodWithName(namespace, "simplePeerPod") newTestCase(t, "SimplePeerPod", assert, "PodVM is created").withPod(pod).run() } @@ -31,29 +30,29 @@ func doTestCreateSimplePodWithNydusAnnotation(t *testing.T, assert CloudAssert) annotationData := map[string]string{ "io.containerd.cri.runtime-handler": "kata-remote", } - pod := newPod(namespace, "alpine", "alpine", "alpine", withRestartPolicy(corev1.RestartPolicyNever), withAnnotations(annotationData)) + pod := newPod(namespace, "alpine", "alpine", "alpine", withRestartPolicy(v1.RestartPolicyNever), withAnnotations(annotationData)) newTestCase(t, "SimplePeerPod", assert, "PodVM is created").withPod(pod).withNydusSnapshotter().run() } func doTestDeleteSimplePod(t *testing.T, assert CloudAssert) { namespace := envconf.RandomName("default", 7) - pod := newNginxPodWithName(namespace, "deletion-test") + pod := newBusyboxPodWithName(namespace, "deletion-test") duration := 1 * time.Minute newTestCase(t, "DeletePod", assert, "Deletion complete").withPod(pod).withDeleteAssertion(&duration).run() } func doTestCreatePodWithConfigMap(t *testing.T, assert CloudAssert) { namespace := envconf.RandomName("default", 7) - podName := "nginx-configmap-pod" - containerName := "nginx-configmap-container" - imageName := "nginx:latest" - configMapName := "nginx-configmap" + podName := "busybox-configmap-pod" + containerName := "busybox-configmap-container" + imageName := BUSYBOX_IMAGE + configMapName := "busybox-configmap" configMapFileName := "example.txt" podKubeConfigmapDir := "/etc/config/" configMapPath := podKubeConfigmapDir + configMapFileName configMapContents := "Hello, world" configMapData := map[string]string{configMapFileName: configMapContents} - pod := newPod(namespace, podName, containerName, imageName, withConfigMapBinding(podKubeConfigmapDir, configMapName), withContainerPort(80)) + pod := newPod(namespace, podName, containerName, imageName, withConfigMapBinding(podKubeConfigmapDir, configMapName), withCommand([]string{"/bin/sh", "-c", "sleep 3600"})) configMap := newConfigMap(namespace, configMapName, configMapData) testCommands := []testCommand{ { @@ -77,10 +76,10 @@ func doTestCreatePodWithConfigMap(t *testing.T, assert CloudAssert) { func doTestCreatePodWithSecret(t *testing.T, assert CloudAssert) { //doTestCreatePod(t, assert, "Secret is created and contains data", pod) namespace := envconf.RandomName("default", 7) - podName := "nginx-secret-pod" - containerName := "nginx-secret-container" - imageName := "nginx:latest" - secretName := "nginx-secret" + podName := "busybox-secret-pod" + containerName := "busybox-secret-container" + imageName := BUSYBOX_IMAGE + secretName := "busybox-secret" podKubeSecretsDir := "/etc/secret/" usernameFileName := "username" username := "admin" @@ -89,7 +88,7 @@ func doTestCreatePodWithSecret(t *testing.T, assert CloudAssert) { password := "password" passwordPath := podKubeSecretsDir + passwordFileName secretData := map[string][]byte{passwordFileName: []byte(password), usernameFileName: []byte(username)} - pod := newPod(namespace, podName, containerName, imageName, withSecretBinding(podKubeSecretsDir, secretName), withContainerPort(80)) + pod := newPod(namespace, podName, containerName, imageName, withSecretBinding(podKubeSecretsDir, secretName), withCommand([]string{"/bin/sh", "-c", "sleep 3600"})) secret := newSecret(namespace, secretName, secretData, v1.SecretTypeOpaque) testCommands := []testCommand{ @@ -168,7 +167,7 @@ func doTestCreatePeerPodAndCheckUserLogs(t *testing.T, assert CloudAssert) { // doTestCreateConfidentialPod verify a confidential peer-pod can be created. func doTestCreateConfidentialPod(t *testing.T, assert CloudAssert, testCommands []testCommand) { namespace := envconf.RandomName("default", 7) - pod := newNginxPodWithName(namespace, "confidential-pod-nginx") + pod := newBusyboxPodWithName(namespace, "confidential-pod-busybox") for i := 0; i < len(testCommands); i++ { testCommands[i].containerName = pod.Spec.Containers[0].Name } @@ -197,8 +196,8 @@ func doTestCreatePeerPodAndCheckEnvVariableLogsWithImageOnly(t *testing.T, asser func doTestCreatePeerPodAndCheckEnvVariableLogsWithDeploymentOnly(t *testing.T, assert CloudAssert) { namespace := envconf.RandomName("default", 7) podName := "env-variable-in-config" - imageName := "nginx:latest" - pod := newPod(namespace, podName, podName, imageName, withRestartPolicy(v1.RestartPolicyOnFailure), withEnvironmentalVariables([]v1.EnvVar{{Name: "ISPRODUCTION", Value: "true"}}), withCommand([]string{"/bin/sh", "-c", "env"}), withContainerPort(80)) + imageName := BUSYBOX_IMAGE + pod := newPod(namespace, podName, podName, imageName, withRestartPolicy(v1.RestartPolicyOnFailure), withEnvironmentalVariables([]v1.EnvVar{{Name: "ISPRODUCTION", Value: "true"}}), withCommand([]string{"/bin/sh", "-c", "env"})) expectedPodLogString := "ISPRODUCTION=true" newTestCase(t, "EnvVariablePeerPodWithDeploymentOnly", assert, "Peer pod with environmental variables has been created").withPod(pod).withExpectedPodLogString(expectedPodLogString).withCustomPodState(v1.PodSucceeded).run() } @@ -298,7 +297,7 @@ func doTestPodVMwithNoAnnotations(t *testing.T, assert CloudAssert, expectedType namespace := envconf.RandomName("default", 7) podName := "no-annotations" containerName := "busybox" - imageName := "busybox:latest" + imageName := BUSYBOX_IMAGE pod := newPod(namespace, podName, containerName, imageName, withCommand([]string{"/bin/sh", "-c", "sleep 3600"})) testInstanceTypes := instanceValidatorFunctions{ testSuccessfn: func(instance string) bool { @@ -319,7 +318,7 @@ func doTestPodVMwithAnnotationsInstanceType(t *testing.T, assert CloudAssert, ex namespace := envconf.RandomName("default", 7) podName := "annotations-instance-type" containerName := "busybox" - imageName := "busybox:latest" + imageName := BUSYBOX_IMAGE annotationData := map[string]string{ "io.katacontainers.config.hypervisor.machine_type": expectedType, } @@ -344,7 +343,7 @@ func doTestPodVMwithAnnotationsCPUMemory(t *testing.T, assert CloudAssert, expec namespace := envconf.RandomName("default", 7) podName := "annotations-cpu-mem" containerName := "busybox" - imageName := "busybox:latest" + imageName := BUSYBOX_IMAGE annotationData := map[string]string{ "io.katacontainers.config.hypervisor.default_vcpus": "2", "io.katacontainers.config.hypervisor.default_memory": "12288", @@ -370,7 +369,7 @@ func doTestPodVMwithAnnotationsInvalidInstanceType(t *testing.T, assert CloudAss namespace := envconf.RandomName("default", 7) podName := "annotations-invalid-instance-type" containerName := "busybox" - imageName := "busybox:latest" + imageName := BUSYBOX_IMAGE expectedErrorMessage := `requested instance type ("` + expectedType + `") is not part of supported instance types list` annotationData := map[string]string{ "io.katacontainers.config.hypervisor.machine_type": expectedType, @@ -395,7 +394,7 @@ func doTestPodVMwithAnnotationsLargerMemory(t *testing.T, assert CloudAssert) { namespace := envconf.RandomName("default", 7) podName := "annotations-too-big-mem" containerName := "busybox" - imageName := "busybox:latest" + imageName := BUSYBOX_IMAGE expectedErrorMessage := "failed to get instance type based on vCPU and memory annotations: no instance type found for the given vcpus (2) and memory (18432)" annotationData := map[string]string{ "io.katacontainers.config.hypervisor.default_vcpus": "2", @@ -421,7 +420,7 @@ func doTestPodVMwithAnnotationsLargerCPU(t *testing.T, assert CloudAssert) { namespace := envconf.RandomName("default", 7) podName := "annotations-too-big-cpu" containerName := "busybox" - imageName := "busybox:latest" + imageName := BUSYBOX_IMAGE expectedErrorMessage := []string{ "no instance type found for the given vcpus (3) and memory (12288)", "Number of cpus 3 specified in annotation default_vcpus is greater than the number of CPUs 2 on the system", diff --git a/test/e2e/libvirt_test.go b/test/e2e/libvirt_test.go index 2dbca08055..278f41f050 100644 --- a/test/e2e/libvirt_test.go +++ b/test/e2e/libvirt_test.go @@ -67,6 +67,11 @@ func TestLibvirtCreatePeerPodAndCheckEnvVariableLogsWithImageAndDeployment(t *te doTestCreatePeerPodAndCheckEnvVariableLogsWithImageAndDeployment(t, assert) } +func TestLibvirtCreateNginxDeployment(t *testing.T) { + assert := LibvirtAssert{} + doTestNginxDeployement(t, assert) +} + /* Failing due to issues will pulling image (ErrImagePull) func TestLibvirtCreatePeerPodWithLargeImage(t *testing.T) { diff --git a/test/e2e/nginx_deployment_test.go b/test/e2e/nginx_deployment_test.go new file mode 100644 index 0000000000..908f8f1397 --- /dev/null +++ b/test/e2e/nginx_deployment_test.go @@ -0,0 +1,193 @@ +// (C) Copyright Confidential Containers Contributors +// SPDX-License-Identifier: Apache-2.0 + +package e2e + +import ( + "context" + "fmt" + "testing" + "time" + + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/e2e-framework/klient" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/klient/wait/conditions" + envconf "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +const WAIT_NGINX_DEPLOYMENT_TIMEOUT = time.Second * 300 + +type deploymentOption func(*appsv1.Deployment) + +func withReplicaCount(replicas int32) deploymentOption { + return func(deployment *appsv1.Deployment) { + deployment.Spec.Replicas = &replicas + } +} + +func newDeployment(namespace, deploymentName, containerName, imageName string, options ...deploymentOption) *appsv1.Deployment { + runtimeClassName := "kata-remote" + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentName, + Namespace: namespace, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": containerName}, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": containerName}, + }, + Spec: v1.PodSpec{ + RuntimeClassName: &runtimeClassName, + Containers: []v1.Container{ + { + Name: containerName, + Image: imageName, + ReadinessProbe: &v1.Probe{ + ProbeHandler: v1.ProbeHandler{ + HTTPGet: &v1.HTTPGetAction{ + Path: "/", + Port: intstr.FromInt(80), + }, + }, + InitialDelaySeconds: 5, + PeriodSeconds: 5, + TimeoutSeconds: 1, + FailureThreshold: 3, + }, + LivenessProbe: &v1.Probe{ + ProbeHandler: v1.ProbeHandler{ + HTTPGet: &v1.HTTPGetAction{ + Path: "/", + Port: intstr.FromInt(80), + }, + }, + InitialDelaySeconds: 3, + PeriodSeconds: 30, + TimeoutSeconds: 1, + FailureThreshold: 3, + }, + }, + }, + }, + }, + }, + } + + for _, option := range options { + option(deployment) + } + return deployment +} + +func doTestNginxDeployement(t *testing.T, assert CloudAssert) { + namespace := envconf.RandomName("default", 7) + deploymentName := "nginx-deployment" + containerName := "nginx" + imageName := "nginx:latest" + replicas := int32(5) + deployment := newDeployment(namespace, deploymentName, containerName, imageName, withReplicaCount(replicas)) + + nginxImageFeature := features.New("Nginx image deployment test"). + WithSetup("Create nginx deployment", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client, err := cfg.NewClient() + if err != nil { + t.Fatal(err) + } + log.Info("Creating nginx deployment...") + if err = client.Resources().Create(ctx, deployment); err != nil { + t.Fatal(err) + } + waitForNginxDeploymentAvailable(ctx, t, client, deployment, replicas) + log.Info("nginx deployment is available now") + return ctx + }). + Assess("Access for nginx deployment test", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client, err := cfg.NewClient() + if err != nil { + t.Fatal(err) + } + var podlist v1.PodList + if err := client.Resources(deployment.ObjectMeta.Namespace).List(ctx, &podlist); err != nil { + t.Fatal(err) + } + for _, pod := range podlist.Items { + if pod.ObjectMeta.Labels["app"] == "nginx" { + assert.HasPodVM(t, pod.ObjectMeta.Name) + } + } + return ctx + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client, err := cfg.NewClient() + if err != nil { + t.Fatal(err) + } + + log.Info("Deleting webserver deployment...") + duration := 2 * time.Minute + if err = client.Resources().Delete(ctx, deployment); err != nil { + t.Fatal(err) + } + log.Infof("Deleting deployment %s...", deploymentName) + if err = wait.For(conditions.New( + client.Resources()).ResourceDeleted(deployment), + wait.WithInterval(5*time.Second), + wait.WithTimeout(duration)); err != nil { + t.Fatal(err) + } + log.Infof("Deployment %s has been successfully deleted within %.0fs", deploymentName, duration.Seconds()) + + return ctx + }).Feature() + + testEnv.Test(t, nginxImageFeature) +} + +func waitForNginxDeploymentAvailable(ctx context.Context, t *testing.T, client klient.Client, deployment *appsv1.Deployment, rc int32) { + if err := wait.For(conditions.New(client.Resources()).ResourceMatch(deployment, func(object k8s.Object) bool { + deployObj, ok := object.(*appsv1.Deployment) + if !ok { + log.Printf("Not a Deployment object: %v", object) + return false + } + log.Printf("Current deployment available replicas: %d", deployObj.Status.AvailableReplicas) + return deployObj.Status.AvailableReplicas == rc + }), wait.WithTimeout(WAIT_NGINX_DEPLOYMENT_TIMEOUT)); err != nil { + var podlist v1.PodList + if err := client.Resources(deployment.ObjectMeta.Namespace).List(ctx, &podlist); err != nil { + t.Fatal(err) + } + for _, pod := range podlist.Items { + if pod.ObjectMeta.Labels["app"] == "nginx" { + //Added logs for debugging nightly tests + fmt.Printf("===================\n") + t.Logf("Debug infor for pod: %v", pod.ObjectMeta.Name) + yamlData, err := yaml.Marshal(pod.Status) + if err != nil { + fmt.Println("Error marshaling pod.Status to YAML: ", err.Error()) + } else { + t.Logf("Current Pod State: %v", string(yamlData)) + } + if pod.Status.Phase == v1.PodRunning { + fmt.Printf("Log of the pod %.v \n===================\n", pod.Name) + podLogString, _ := getPodLog(ctx, client, pod) + fmt.Println(podLogString) + fmt.Printf("===================\n") + } + } + } + t.Fatal(err) + } +}