Skip to content

Commit

Permalink
e2e: use busybox to reduce test execution time
Browse files Browse the repository at this point in the history
- change to check container status for pod with running and testcommands.
- use busybox to reduce test execution time
- add a new nginx deployment test
- reduce WAIT_POD_RUNNING_TIMEOUT from 900 to 600

fixes for confidential-containers#1450

Signed-off-by: Da Li Liu <liudali@cn.ibm.com>
  • Loading branch information
Da Li Liu committed Dec 22, 2023
1 parent 02662fe commit a293a49
Show file tree
Hide file tree
Showing 5 changed files with 257 additions and 83 deletions.
65 changes: 27 additions & 38 deletions test/e2e/assessment_runner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"time"

log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -21,7 +22,7 @@ import (
"sigs.k8s.io/e2e-framework/pkg/features"
)

const WAIT_POD_RUNNING_TIMEOUT = time.Second * 900
const WAIT_POD_RUNNING_TIMEOUT = time.Second * 600
const WAIT_JOB_RUNNING_TIMEOUT = time.Second * 600

// testCommand is a list of commands to execute inside the pod container,
Expand Down Expand Up @@ -189,45 +190,33 @@ func (tc *testCase) run() {
if err = wait.For(conditions.New(client.Resources()).PodPhaseMatch(tc.pod, tc.podState), wait.WithTimeout(WAIT_POD_RUNNING_TIMEOUT)); err != nil {
t.Fatal(err)
}
if tc.podState == v1.PodRunning || tc.pod.Spec.Containers[0].ReadinessProbe != nil {
clientset, err := kubernetes.NewForConfig(client.RESTConfig())
if err != nil {
t.Fatal(err)
}
pod, err := clientset.CoreV1().Pods(tc.pod.Namespace).Get(ctx, tc.pod.Name, metav1.GetOptions{})
if err != nil {
if tc.podState == v1.PodRunning || len(tc.testCommands) > 0 {
t.Logf("Waiting for containers in pod: %v are ready", tc.pod.Name)
if err = wait.For(conditions.New(client.Resources()).ContainersReady(tc.pod), wait.WithTimeout(WAIT_POD_RUNNING_TIMEOUT)); err != nil {
//Added logs for debugging nightly tests
clientset, err := kubernetes.NewForConfig(client.RESTConfig())
if err != nil {
t.Fatal(err)
}
pod, err := clientset.CoreV1().Pods(tc.pod.Namespace).Get(ctx, tc.pod.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
t.Logf("Expected Pod State: %v", tc.podState)
yamlData, err := yaml.Marshal(pod.Status)
if err != nil {
fmt.Println("Error marshaling pod.Status to YAML: ", err.Error())
} else {
t.Logf("Current Pod State: %v", string(yamlData))
}
if pod.Status.Phase == v1.PodRunning {
fmt.Printf("Log of the pod %.v \n===================\n", pod.Name)
podLogString, _ := getPodLog(ctx, client, *pod)
fmt.Println(podLogString)
fmt.Printf("===================\n")
}
t.Fatal(err)
}
//Added logs for debugging nightly tests
t.Logf("Expected Pod State: %v", tc.podState)
t.Logf("Current Pod State: %v", pod.Status.Phase)
//Getting Readiness probe of a container
for i, condition := range pod.Status.Conditions {
fmt.Printf("===================\n")
fmt.Printf("Checking Conditons - %v....\n", i+1)
fmt.Printf("===================\n")
fmt.Printf("*.Condition Type: %v\n", condition.Type)
fmt.Printf("*.Condition Status: %v\n", condition.Status)
fmt.Printf("*.Condition Last Probe Time: %v\n", condition.LastProbeTime)
fmt.Printf("*.Condition Last Transition Time: %v\n", condition.LastTransitionTime)
fmt.Printf("*.Condition Last Message: %v\n", condition.Message)
fmt.Printf("*.Condition Last Reason: %v\n", condition.Reason)
}

readinessProbe := pod.Spec.Containers[0].ReadinessProbe
if readinessProbe != nil {
fmt.Printf("===================\n")
fmt.Printf("Checking Readiness Probe....\n")
fmt.Printf("===================\n")
fmt.Printf("*.Initial Delay Seconds: %v\n", readinessProbe.InitialDelaySeconds)
fmt.Printf("*.Timeout Seconds: %v\n", readinessProbe.TimeoutSeconds)
fmt.Printf("*.Success Threshold: %v\n", readinessProbe.SuccessThreshold)
fmt.Printf("*.Failure Threshold: %v\n", readinessProbe.FailureThreshold)
fmt.Printf("*.Period Seconds: %v\n", readinessProbe.PeriodSeconds)
fmt.Printf("*.Probe Handler: %v\n", readinessProbe.ProbeHandler)
fmt.Printf("*.Probe Handler Port: %v\n", readinessProbe.ProbeHandler.HTTPGet.Port)
fmt.Printf("===================\n")
}
}
}

Expand Down
34 changes: 11 additions & 23 deletions test/e2e/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,10 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)

const BUSYBOX_IMAGE = "quay.io/prometheus/busybox:latest"

type podOption func(*corev1.Pod)

func withRestartPolicy(restartPolicy corev1.RestartPolicy) podOption {
Expand All @@ -19,23 +20,6 @@ func withRestartPolicy(restartPolicy corev1.RestartPolicy) podOption {
}
}

// Optional method to add ContainerPort and ReadinessProbe to listen Port 80
func withContainerPort(port int32) podOption {
return func(p *corev1.Pod) {
p.Spec.Containers[0].Ports = []corev1.ContainerPort{{ContainerPort: port}}
p.Spec.Containers[0].ReadinessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/",
Port: intstr.FromInt(int(port)),
},
},
InitialDelaySeconds: 10,
PeriodSeconds: 5,
}
}
}

func withCommand(command []string) podOption {
return func(p *corev1.Pod) {
p.Spec.Containers[0].Command = command
Expand Down Expand Up @@ -108,27 +92,31 @@ func newPod(namespace string, podName string, containerName string, imageName st
}

func newNginxPod(namespace string) *corev1.Pod {
return newPod(namespace, "nginx", "nginx", "nginx", withRestartPolicy(corev1.RestartPolicyNever))
return newNginxPodWithName(namespace, "nginx")
}

func newNginxPodWithName(namespace string, podName string) *corev1.Pod {
return newPod(namespace, podName, "nginx", "nginx", withRestartPolicy(corev1.RestartPolicyNever))
}

func newBusyboxPod(namespace string) *corev1.Pod {
return newPod(namespace, "busybox-pod", "busybox", "quay.io/prometheus/busybox:latest", withCommand([]string{"/bin/sh", "-c", "sleep 3600"}))
return newBusyboxPodWithName(namespace, "busybox")
}

func newBusyboxPodWithName(namespace, podName string) *corev1.Pod {
return newPod(namespace, podName, "busybox", BUSYBOX_IMAGE, withCommand([]string{"/bin/sh", "-c", "sleep 3600"}))
}

// newConfigMap returns a new config map object.
func newConfigMap(namespace string, name string, configMapData map[string]string) *corev1.ConfigMap {
func newConfigMap(namespace, name string, configMapData map[string]string) *corev1.ConfigMap {
return &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
Data: configMapData,
}
}

// newSecret returns a new secret object.
func newSecret(namespace string, name string, data map[string][]byte, secretType corev1.SecretType) *corev1.Secret {
func newSecret(namespace, name string, data map[string][]byte, secretType corev1.SecretType) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
Data: data,
Expand All @@ -137,7 +125,7 @@ func newSecret(namespace string, name string, data map[string][]byte, secretType
}

// newJob returns a new job
func newJob(namespace string, name string) *batchv1.Job {
func newJob(namespace, name string) *batchv1.Job {
runtimeClassName := "kata-remote"
// Comment out adding runtime-handler until nydus-snapshotter is stable
// annotationData := map[string]string{
Expand Down
43 changes: 21 additions & 22 deletions test/e2e/common_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ import (
"time"

log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
envconf "sigs.k8s.io/e2e-framework/pkg/envconf"
)
Expand All @@ -31,29 +30,29 @@ func doTestCreateSimplePodWithNydusAnnotation(t *testing.T, assert CloudAssert)
annotationData := map[string]string{
"io.containerd.cri.runtime-handler": "kata-remote",
}
pod := newPod(namespace, "alpine", "alpine", "alpine", withRestartPolicy(corev1.RestartPolicyNever), withAnnotations(annotationData))
pod := newPod(namespace, "alpine", "alpine", "alpine", withRestartPolicy(v1.RestartPolicyNever), withAnnotations(annotationData))
newTestCase(t, "SimplePeerPod", assert, "PodVM is created").withPod(pod).withNydusSnapshotter().run()
}

func doTestDeleteSimplePod(t *testing.T, assert CloudAssert) {
namespace := envconf.RandomName("default", 7)
pod := newNginxPodWithName(namespace, "deletion-test")
pod := newBusyboxPodWithName(namespace, "deletion-test")
duration := 1 * time.Minute
newTestCase(t, "DeletePod", assert, "Deletion complete").withPod(pod).withDeleteAssertion(&duration).run()
}

func doTestCreatePodWithConfigMap(t *testing.T, assert CloudAssert) {
namespace := envconf.RandomName("default", 7)
podName := "nginx-configmap-pod"
containerName := "nginx-configmap-container"
imageName := "nginx:latest"
configMapName := "nginx-configmap"
podName := "busybox-configmap-pod"
containerName := "busybox-configmap-container"
imageName := BUSYBOX_IMAGE
configMapName := "busybox-configmap"
configMapFileName := "example.txt"
podKubeConfigmapDir := "/etc/config/"
configMapPath := podKubeConfigmapDir + configMapFileName
configMapContents := "Hello, world"
configMapData := map[string]string{configMapFileName: configMapContents}
pod := newPod(namespace, podName, containerName, imageName, withConfigMapBinding(podKubeConfigmapDir, configMapName), withContainerPort(80))
pod := newPod(namespace, podName, containerName, imageName, withConfigMapBinding(podKubeConfigmapDir, configMapName), withCommand([]string{"/bin/sh", "-c", "sleep 3600"}))
configMap := newConfigMap(namespace, configMapName, configMapData)
testCommands := []testCommand{
{
Expand All @@ -77,10 +76,10 @@ func doTestCreatePodWithConfigMap(t *testing.T, assert CloudAssert) {
func doTestCreatePodWithSecret(t *testing.T, assert CloudAssert) {
//doTestCreatePod(t, assert, "Secret is created and contains data", pod)
namespace := envconf.RandomName("default", 7)
podName := "nginx-secret-pod"
containerName := "nginx-secret-container"
imageName := "nginx:latest"
secretName := "nginx-secret"
podName := "busybox-secret-pod"
containerName := "busybox-secret-container"
imageName := BUSYBOX_IMAGE
secretName := "busybox-secret"
podKubeSecretsDir := "/etc/secret/"
usernameFileName := "username"
username := "admin"
Expand All @@ -89,7 +88,7 @@ func doTestCreatePodWithSecret(t *testing.T, assert CloudAssert) {
password := "password"
passwordPath := podKubeSecretsDir + passwordFileName
secretData := map[string][]byte{passwordFileName: []byte(password), usernameFileName: []byte(username)}
pod := newPod(namespace, podName, containerName, imageName, withSecretBinding(podKubeSecretsDir, secretName), withContainerPort(80))
pod := newPod(namespace, podName, containerName, imageName, withSecretBinding(podKubeSecretsDir, secretName), withCommand([]string{"/bin/sh", "-c", "sleep 3600"}))
secret := newSecret(namespace, secretName, secretData, v1.SecretTypeOpaque)

testCommands := []testCommand{
Expand Down Expand Up @@ -168,7 +167,7 @@ func doTestCreatePeerPodAndCheckUserLogs(t *testing.T, assert CloudAssert) {
// doTestCreateConfidentialPod verify a confidential peer-pod can be created.
func doTestCreateConfidentialPod(t *testing.T, assert CloudAssert, testCommands []testCommand) {
namespace := envconf.RandomName("default", 7)
pod := newNginxPodWithName(namespace, "confidential-pod-nginx")
pod := newBusyboxPodWithName(namespace, "confidential-pod-busybox")
for i := 0; i < len(testCommands); i++ {
testCommands[i].containerName = pod.Spec.Containers[0].Name
}
Expand Down Expand Up @@ -197,8 +196,8 @@ func doTestCreatePeerPodAndCheckEnvVariableLogsWithImageOnly(t *testing.T, asser
func doTestCreatePeerPodAndCheckEnvVariableLogsWithDeploymentOnly(t *testing.T, assert CloudAssert) {
namespace := envconf.RandomName("default", 7)
podName := "env-variable-in-config"
imageName := "nginx:latest"
pod := newPod(namespace, podName, podName, imageName, withRestartPolicy(v1.RestartPolicyOnFailure), withEnvironmentalVariables([]v1.EnvVar{{Name: "ISPRODUCTION", Value: "true"}}), withCommand([]string{"/bin/sh", "-c", "env"}), withContainerPort(80))
imageName := BUSYBOX_IMAGE
pod := newPod(namespace, podName, podName, imageName, withRestartPolicy(v1.RestartPolicyOnFailure), withEnvironmentalVariables([]v1.EnvVar{{Name: "ISPRODUCTION", Value: "true"}}), withCommand([]string{"/bin/sh", "-c", "env"}))
expectedPodLogString := "ISPRODUCTION=true"
newTestCase(t, "EnvVariablePeerPodWithDeploymentOnly", assert, "Peer pod with environmental variables has been created").withPod(pod).withExpectedPodLogString(expectedPodLogString).withCustomPodState(v1.PodSucceeded).run()
}
Expand Down Expand Up @@ -298,7 +297,7 @@ func doTestPodVMwithNoAnnotations(t *testing.T, assert CloudAssert, expectedType
namespace := envconf.RandomName("default", 7)
podName := "no-annotations"
containerName := "busybox"
imageName := "busybox:latest"
imageName := BUSYBOX_IMAGE
pod := newPod(namespace, podName, containerName, imageName, withCommand([]string{"/bin/sh", "-c", "sleep 3600"}))
testInstanceTypes := instanceValidatorFunctions{
testSuccessfn: func(instance string) bool {
Expand All @@ -319,7 +318,7 @@ func doTestPodVMwithAnnotationsInstanceType(t *testing.T, assert CloudAssert, ex
namespace := envconf.RandomName("default", 7)
podName := "annotations-instance-type"
containerName := "busybox"
imageName := "busybox:latest"
imageName := BUSYBOX_IMAGE
annotationData := map[string]string{
"io.katacontainers.config.hypervisor.machine_type": expectedType,
}
Expand All @@ -344,7 +343,7 @@ func doTestPodVMwithAnnotationsCPUMemory(t *testing.T, assert CloudAssert, expec
namespace := envconf.RandomName("default", 7)
podName := "annotations-cpu-mem"
containerName := "busybox"
imageName := "busybox:latest"
imageName := BUSYBOX_IMAGE
annotationData := map[string]string{
"io.katacontainers.config.hypervisor.default_vcpus": "2",
"io.katacontainers.config.hypervisor.default_memory": "12288",
Expand All @@ -370,7 +369,7 @@ func doTestPodVMwithAnnotationsInvalidInstanceType(t *testing.T, assert CloudAss
namespace := envconf.RandomName("default", 7)
podName := "annotations-invalid-instance-type"
containerName := "busybox"
imageName := "busybox:latest"
imageName := BUSYBOX_IMAGE
expectedErrorMessage := `requested instance type ("` + expectedType + `") is not part of supported instance types list`
annotationData := map[string]string{
"io.katacontainers.config.hypervisor.machine_type": expectedType,
Expand All @@ -395,7 +394,7 @@ func doTestPodVMwithAnnotationsLargerMemory(t *testing.T, assert CloudAssert) {
namespace := envconf.RandomName("default", 7)
podName := "annotations-too-big-mem"
containerName := "busybox"
imageName := "busybox:latest"
imageName := BUSYBOX_IMAGE
expectedErrorMessage := "failed to get instance type based on vCPU and memory annotations: no instance type found for the given vcpus (2) and memory (18432)"
annotationData := map[string]string{
"io.katacontainers.config.hypervisor.default_vcpus": "2",
Expand All @@ -421,7 +420,7 @@ func doTestPodVMwithAnnotationsLargerCPU(t *testing.T, assert CloudAssert) {
namespace := envconf.RandomName("default", 7)
podName := "annotations-too-big-cpu"
containerName := "busybox"
imageName := "busybox:latest"
imageName := BUSYBOX_IMAGE
expectedErrorMessage := []string{
"no instance type found for the given vcpus (3) and memory (12288)",
"Number of cpus 3 specified in annotation default_vcpus is greater than the number of CPUs 2 on the system",
Expand Down
5 changes: 5 additions & 0 deletions test/e2e/libvirt_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,11 @@ func TestLibvirtCreatePeerPodAndCheckEnvVariableLogsWithImageAndDeployment(t *te
doTestCreatePeerPodAndCheckEnvVariableLogsWithImageAndDeployment(t, assert)
}

func TestLibvirtCreateNginxDeployment(t *testing.T) {
assert := LibvirtAssert{}
doTestNginxDeployement(t, assert)
}

/*
Failing due to issues will pulling image (ErrImagePull)
func TestLibvirtCreatePeerPodWithLargeImage(t *testing.T) {
Expand Down
Loading

0 comments on commit a293a49

Please sign in to comment.