From 88054ca267adc35a975e6ac47d87f3aaa8ccf100 Mon Sep 17 00:00:00 2001 From: Magnus Kulke Date: Tue, 30 Jul 2024 14:53:37 +0200 Subject: [PATCH 1/2] libvirt-e2e: create auth.json file The auth.json file is not provisioned for the authenticated registry tests in the libvirt test suite. This adds the file and adjusts the kustomize configuration to use it. Signed-off-by: Magnus Kulke --- .../provisioner/libvirt/provision_common.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/cloud-api-adaptor/test/provisioner/libvirt/provision_common.go b/src/cloud-api-adaptor/test/provisioner/libvirt/provision_common.go index beeecd5d7..d6532d6e9 100644 --- a/src/cloud-api-adaptor/test/provisioner/libvirt/provision_common.go +++ b/src/cloud-api-adaptor/test/provisioner/libvirt/provision_common.go @@ -19,6 +19,14 @@ import ( "sigs.k8s.io/e2e-framework/pkg/envconf" ) +const authJSONTemplate string = `{ + "auths": { + "quay.io": { + "auth": "%s" + } + } +}` + // LibvirtProvisioner implements the CloudProvisioner interface for Libvirt. type LibvirtProvisioner struct { conn *libvirt.Connect // Libvirt connection @@ -337,6 +345,16 @@ func (lio *LibvirtInstallOverlay) Edit(ctx context.Context, cfg *envconf.Config, } } + if cred := os.Getenv("REGISTRY_CREDENTIAL_ENCODED"); cred != "" { + authJSON := fmt.Sprintf(authJSONTemplate, cred) + if err := os.WriteFile(filepath.Join(lio.Overlay.ConfigDir, "auth.json"), []byte(authJSON), 0644); err != nil { + return err + } + if err = lio.Overlay.SetKustomizeSecretGeneratorFile("auth-json-secret", "auth.json"); err != nil { + return err + } + } + if err = lio.Overlay.YamlReload(); err != nil { return err } From a71257f781066d0ecd019ad0155ad5d4b354b5d6 Mon Sep 17 00:00:00 2001 From: Magnus Kulke Date: Tue, 30 Jul 2024 16:46:08 +0200 Subject: [PATCH 2/2] e2e-tests: use t.Log instead of fmt/log Move more `log.` calls to `t.Log` to make e2e log output more readable Signed-off-by: Magnus Kulke --- .../test/e2e/assessment_runner.go | 16 ++++++------- .../test/e2e/rolling_update.go | 23 +++++++++---------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/src/cloud-api-adaptor/test/e2e/assessment_runner.go b/src/cloud-api-adaptor/test/e2e/assessment_runner.go index 03486d043..204e7f29e 100644 --- a/src/cloud-api-adaptor/test/e2e/assessment_runner.go +++ b/src/cloud-api-adaptor/test/e2e/assessment_runner.go @@ -276,15 +276,15 @@ func (tc *TestCase) Run() { t.Logf("Expected Pod State: %v", tc.podState) yamlData, err := yaml.Marshal(pod.Status) if err != nil { - fmt.Println("Error marshaling pod.Status to YAML: ", err.Error()) + t.Logf("Error marshaling pod.Status to YAML: %v", err.Error()) } else { t.Logf("Current Pod State: %v", string(yamlData)) } if pod.Status.Phase == v1.PodRunning { - fmt.Printf("Log of the pod %.v \n===================\n", pod.Name) + t.Logf("Log of the pod %.v \n===================\n", pod.Name) podLogString, _ := GetPodLog(ctx, client, *pod) - fmt.Println(podLogString) - fmt.Printf("===================\n") + t.Log(podLogString) + t.Logf("===================\n") } t.Fatal(err) } @@ -295,11 +295,11 @@ func (tc *TestCase) Run() { t.Fatal(err) } clusterIP := WaitForClusterIP(t, client, tc.service) - log.Printf("webserver service is available on cluster IP: %s", clusterIP) + t.Logf("webserver service is available on cluster IP: %s", clusterIP) } if tc.extraPods != nil { for _, extraPod := range tc.extraPods { - fmt.Printf("Provision extra pod %s", extraPod.pod.Name) + t.Logf("Provision extra pod %s", extraPod.pod.Name) err := ProvisionPod(ctx, client, t, extraPod.pod, extraPod.podState, extraPod.testCommands) if err != nil { t.Fatal(err) @@ -329,7 +329,7 @@ func (tc *TestCase) Run() { } if podLogString != "" { if strings.Contains(podLogString, tc.expectedPodLogString) { - log.Printf("Output Log from Pod: %s", podLogString) + t.Logf("Output Log from Pod: %s", podLogString) } else { t.Errorf("Job Created pod with Invalid log") } @@ -435,7 +435,7 @@ func (tc *TestCase) Run() { t.Fatal(error) } } else if profile != "" { - fmt.Printf("PodVM Created with Instance Type %v", profile) + t.Logf("PodVM Created with Instance Type %v", profile) if tc.FailReason != "" { var podlist v1.PodList var podLogString string diff --git a/src/cloud-api-adaptor/test/e2e/rolling_update.go b/src/cloud-api-adaptor/test/e2e/rolling_update.go index 41756bc2a..6ada27764 100644 --- a/src/cloud-api-adaptor/test/e2e/rolling_update.go +++ b/src/cloud-api-adaptor/test/e2e/rolling_update.go @@ -12,7 +12,6 @@ import ( "testing" "time" - log "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -158,7 +157,7 @@ func DoTestCaaDaemonsetRollingUpdate(t *testing.T, testEnv env.Environment, asse t.Fatal(err) } clusterIP := WaitForClusterIP(t, client, svc) - log.Printf("webserver service is available on cluster IP: %s", clusterIP) + t.Logf("webserver service is available on cluster IP: %s", clusterIP) // Update verify command verifyPod.Spec.Containers[0].Command = append( @@ -210,25 +209,25 @@ func DoTestCaaDaemonsetRollingUpdate(t *testing.T, testEnv env.Environment, asse if err = client.Resources().Get(ctx, verifyPodName, E2eNamespace, verifyPod); err != nil { t.Fatal(err) } - log.Printf("verify pod status: %s", verifyPod.Status.Phase) + t.Logf("verify pod status: %s", verifyPod.Status.Phase) if verifyPod.Status.Phase != v1.PodRunning { clientset, err := kubernetes.NewForConfig(client.RESTConfig()) if err != nil { - log.Printf("Failed to new client set: %v", err) + t.Logf("Failed to new client set: %v", err) } else { req := clientset.CoreV1().Pods(E2eNamespace).GetLogs(verifyPodName, &v1.PodLogOptions{}) podLogs, err := req.Stream(ctx) if err != nil { - log.Printf("Failed to get pod logs: %v", err) + t.Logf("Failed to get pod logs: %v", err) } else { defer podLogs.Close() buf := new(bytes.Buffer) _, err = io.Copy(buf, podLogs) if err != nil { - log.Printf("Failed to copy pod logs: %v", err) + t.Logf("Failed to copy pod logs: %v", err) } else { podLogString := strings.TrimSpace(buf.String()) - log.Printf("verify pod logs: \n%s", podLogString) + t.Logf("verify pod logs: \n%s", podLogString) } } } @@ -272,12 +271,12 @@ func waitForCaaDaemonSetUpdated(t *testing.T, client klient.Client, ds *appsv1.D if err := wait.For(conditions.New(client.Resources()).ResourceMatch(ds, func(object k8s.Object) bool { dsObj, ok := object.(*appsv1.DaemonSet) if !ok { - log.Printf("Not a DaemonSet object: %v", object) + t.Logf("Not a DaemonSet object: %v", object) return false } - log.Printf("Current CAA DaemonSet UpdatedNumberScheduled: %d", dsObj.Status.UpdatedNumberScheduled) - log.Printf("Current CAA DaemonSet NumberAvailable: %d", dsObj.Status.NumberAvailable) + t.Logf("Current CAA DaemonSet UpdatedNumberScheduled: %d", dsObj.Status.UpdatedNumberScheduled) + t.Logf("Current CAA DaemonSet NumberAvailable: %d", dsObj.Status.NumberAvailable) return dsObj.Status.UpdatedNumberScheduled == rc && dsObj.Status.NumberAvailable == rc }), wait.WithTimeout(WAIT_DEPLOYMENT_AVAILABLE_TIMEOUT)); err != nil { t.Fatal(err) @@ -288,11 +287,11 @@ func waitForDeploymentAvailable(t *testing.T, client klient.Client, deployment * if err := wait.For(conditions.New(client.Resources()).ResourceMatch(deployment, func(object k8s.Object) bool { deployObj, ok := object.(*appsv1.Deployment) if !ok { - log.Printf("Not a Deployment object: %v", object) + t.Logf("Not a Deployment object: %v", object) return false } - log.Printf("Current deployment available replicas: %d", deployObj.Status.AvailableReplicas) + t.Logf("Current deployment available replicas: %d", deployObj.Status.AvailableReplicas) return deployObj.Status.AvailableReplicas == rc }), wait.WithTimeout(WAIT_DEPLOYMENT_AVAILABLE_TIMEOUT)); err != nil { t.Fatal(err)