diff --git a/Makefile b/Makefile index d32da6b72a..af6cac5367 100644 --- a/Makefile +++ b/Makefile @@ -77,6 +77,7 @@ endif # GINKGO_FOCUS ?= GINKGO_SKIP ?= +GINKGO_NODES ?= 1 GINKGO_TIMEOUT ?= 3h E2E_CONF_FILE ?= $(abspath test/e2e/config/vsphere-dev.yaml) INTEGRATION_CONF_FILE ?= $(abspath test/integration/integration-dev.yaml) @@ -84,6 +85,7 @@ E2E_TEMPLATE_DIR := $(abspath test/e2e/data/infrastructure-vsphere/) SKIP_RESOURCE_CLEANUP ?= false USE_EXISTING_CLUSTER ?= false GINKGO_NOCOLOR ?= false +E2E_IPAM_KUBECONFIG ?= # to set multiple ginkgo skip flags, if any ifneq ($(strip $(GINKGO_SKIP)),) @@ -524,12 +526,13 @@ e2e: $(GINKGO) $(KUSTOMIZE) $(KIND) $(GOVC) ## Run e2e tests @echo Contents of $(TOOLS_BIN_DIR): @ls $(TOOLS_BIN_DIR) @echo - time $(GINKGO) -v --trace -focus="$(GINKGO_FOCUS)" $(_SKIP_ARGS) -timeout=$(GINKGO_TIMEOUT) \ + time $(GINKGO) -v --trace -focus="$(GINKGO_FOCUS)" $(_SKIP_ARGS) --nodes=$(GINKGO_NODES) -timeout=$(GINKGO_TIMEOUT) \ --output-dir="$(ARTIFACTS)" --junit-report="junit.e2e_suite.1.xml" ./test/e2e -- \ --e2e.config="$(E2E_CONF_FILE)" \ --e2e.artifacts-folder="$(ARTIFACTS)" \ --e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ - --e2e.use-existing-cluster="$(USE_EXISTING_CLUSTER)" + --e2e.use-existing-cluster="$(USE_EXISTING_CLUSTER)" \ + --e2e.ipam-kubeconfig="$(E2E_IPAM_KUBECONFIG)" ## -------------------------------------- ## Release diff --git a/hack/e2e.sh b/hack/e2e.sh index 37eb526734..fd79be871b 100755 --- a/hack/e2e.sh +++ b/hack/e2e.sh @@ -25,11 +25,6 @@ REPO_ROOT=$(git rev-parse --show-toplevel) source "${REPO_ROOT}/hack/ensure-kubectl.sh" on_exit() { - # release IPClaim - echo "Releasing IP claims" - kubectl --kubeconfig="${KUBECONFIG}" delete "ipaddressclaim.ipam.cluster.x-k8s.io" "${CONTROL_PLANE_IPCLAIM_NAME}" || true - kubectl --kubeconfig="${KUBECONFIG}" delete "ipaddressclaim.ipam.cluster.x-k8s.io" "${WORKLOAD_IPCLAIM_NAME}" || true - # kill the VPN docker kill vpn @@ -62,6 +57,12 @@ export ARTIFACTS="${ARTIFACTS:-${REPO_ROOT}/_artifacts}" export DOCKER_IMAGE_TAR="/tmp/images/image.tar" export GC_KIND="false" +# Make tests run in-parallel +export GINKGO_NODES=5 +# Set the kubeconfig to the IPAM cluster so the e2e tests can claim ip addresses +# for kube-vip. +export E2E_IPAM_KUBECONFIG="/root/ipam-conf/capv-services.conf" + # Run the vpn client in container docker run --rm -d --name vpn -v "${HOME}/.openvpn/:${HOME}/.openvpn/" \ -w "${HOME}/.openvpn/" --cap-add=NET_ADMIN --net=host --device=/dev/net/tun \ @@ -70,63 +71,28 @@ docker run --rm -d --name vpn -v "${HOME}/.openvpn/:${HOME}/.openvpn/" \ # Tail the vpn logs docker logs vpn -# Sleep to allow vpn container to start running -sleep 30 - - -function kubectl_get_jsonpath() { - local OBJECT_KIND="${1}" - local OBJECT_NAME="${2}" - local JSON_PATH="${3}" +# Wait until the VPN connection is active and we are able to reach the ipam cluster +function wait_for_ipam_reachable() { local n=0 until [ $n -ge 30 ]; do - OUTPUT=$(kubectl --kubeconfig="${KUBECONFIG}" get "${OBJECT_KIND}.ipam.cluster.x-k8s.io" "${OBJECT_NAME}" -o=jsonpath="${JSON_PATH}") - if [[ "${OUTPUT}" != "" ]]; then + kubectl --kubeconfig="${E2E_IPAM_KUBECONFIG}" --request-timeout=2s cluster-info && RET=$? || RET=$? + if [[ "$RET" -eq 0 ]]; then break fi n=$((n + 1)) sleep 1 done - - if [[ "${OUTPUT}" == "" ]]; then - echo "Received empty output getting ${JSON_PATH} from ${OBJECT_KIND}/${OBJECT_NAME}" 1>&2 - return 1 - else - echo "${OUTPUT}" - return 0 - fi } - -function claim_ip() { - IPCLAIM_NAME="$1" - export IPCLAIM_NAME - envsubst < "${REPO_ROOT}/hack/ipclaim-template.yaml" | kubectl --kubeconfig="${KUBECONFIG}" create -f - 1>&2 - IPADDRESS_NAME=$(kubectl_get_jsonpath ipaddressclaim "${IPCLAIM_NAME}" '{@.status.addressRef.name}') - kubectl --kubeconfig="${KUBECONFIG}" get "ipaddresses.ipam.cluster.x-k8s.io" "${IPADDRESS_NAME}" -o=jsonpath='{@.spec.address}' -} - -export KUBECONFIG="/root/ipam-conf/capv-services.conf" +wait_for_ipam_reachable make envsubst -# Retrieve an IP to be used as the kube-vip IP -CONTROL_PLANE_IPCLAIM_NAME="ip-claim-$(openssl rand -hex 20)" -CONTROL_PLANE_ENDPOINT_IP=$(claim_ip "${CONTROL_PLANE_IPCLAIM_NAME}") -export CONTROL_PLANE_ENDPOINT_IP -echo "Acquired Control Plane IP: $CONTROL_PLANE_ENDPOINT_IP" - -# Retrieve an IP to be used for the workload cluster in v1a3/v1a4 -> v1b1 upgrade tests -WORKLOAD_IPCLAIM_NAME="workload-ip-claim-$(openssl rand -hex 20)" -WORKLOAD_CONTROL_PLANE_ENDPOINT_IP=$(claim_ip "${WORKLOAD_IPCLAIM_NAME}") -export WORKLOAD_CONTROL_PLANE_ENDPOINT_IP -echo "Acquired Workload Cluster Control Plane IP: $WORKLOAD_CONTROL_PLANE_ENDPOINT_IP" - -# save the docker image locally +# Save the docker image locally make e2e-image mkdir -p /tmp/images docker save gcr.io/k8s-staging-cluster-api/capv-manager:e2e -o "$DOCKER_IMAGE_TAR" -# store the image on gcs +# Store the image on gcs login E2E_IMAGE_SHA=$(docker inspect --format='{{index .Id}}' gcr.io/k8s-staging-cluster-api/capv-manager:e2e) export E2E_IMAGE_SHA diff --git a/test/e2e/README.md b/test/e2e/README.md index 06591600f0..ed897de0db 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -20,30 +20,31 @@ In order to run the e2e tests the following requirements must be met: The first step to running the e2e tests is setting up the required environment variables: -| Environment variable | Description | Example | -|------------------------------|-------------------------------------------------------------------------------------|----------------------------------------------------------------------------------| -| `VSPHERE_SERVER` | The IP address or FQDN of a vCenter 6.7u3 server | `my.vcenter.com` | -| `VSPHERE_USERNAME` | The username used to access the vSphere server | `my-username` | -| `VSPHERE_PASSWORD` | The password used to access the vSphere server | `my-password` | -| `VSPHERE_DATACENTER` | The unique name or inventory path of the datacenter in which VMs will be created | `my-datacenter` or `/my-datacenter` | -| `VSPHERE_FOLDER` | The unique name or inventory path of the folder in which VMs will be created | `my-folder` or `/my-datacenter/vm/my-folder` | -| `VSPHERE_RESOURCE_POOL` | The unique name or inventory path of the resource pool in which VMs will be created | `my-resource-pool` or `/my-datacenter/host/Cluster-1/Resources/my-resource-pool` | -| `VSPHERE_DATASTORE` | The unique name or inventory path of the datastore in which VMs will be created | `my-datastore` or `/my-datacenter/datstore/my-datastore` | -| `VSPHERE_NETWORK` | The unique name or inventory path of the network to which VMs will be connected | `my-network` or `/my-datacenter/network/my-network` | -| `VSPHERE_SSH_PRIVATE_KEY` | The file path of the private key used to ssh into the CAPV VMs | `/home/foo/bar-ssh.key` | -| `VSPHERE_SSH_AUTHORIZED_KEY` | The public key that is added to the CAPV VMs | `ssh-rsa ABCDEF...XYZ=` | -| `VSPHERE_TLS_THUMBPRINT` | The TLS thumbprint of the vSphere server's certificate which should be trusted | `2A:3F:BC:CA:C0:96:35:D4:B7:A2:AA:3C:C1:33:D9:D7:BE:EC:31:55` | -| `CONTROL_PLANE_ENDPOINT_IP` | The IP that kube-vip should use as a control plane endpoint | `10.10.123.100` | -| `VSPHERE_STORAGE_POLICY` | The name of an existing vSphere storage policy to be assigned to created VMs | `my-test-sp` | +| Environment variable | Description | Example | +|------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------| +| `VSPHERE_SERVER` | The IP address or FQDN of a vCenter 6.7u3 server | `my.vcenter.com` | +| `VSPHERE_USERNAME` | The username used to access the vSphere server | `my-username` | +| `VSPHERE_PASSWORD` | The password used to access the vSphere server | `my-password` | +| `VSPHERE_DATACENTER` | The unique name or inventory path of the datacenter in which VMs will be created | `my-datacenter` or `/my-datacenter` | +| `VSPHERE_FOLDER` | The unique name or inventory path of the folder in which VMs will be created | `my-folder` or `/my-datacenter/vm/my-folder` | +| `VSPHERE_RESOURCE_POOL` | The unique name or inventory path of the resource pool in which VMs will be created | `my-resource-pool` or `/my-datacenter/host/Cluster-1/Resources/my-resource-pool` | +| `VSPHERE_DATASTORE` | The unique name or inventory path of the datastore in which VMs will be created | `my-datastore` or `/my-datacenter/datstore/my-datastore` | +| `VSPHERE_NETWORK` | The unique name or inventory path of the network to which VMs will be connected | `my-network` or `/my-datacenter/network/my-network` | +| `VSPHERE_SSH_PRIVATE_KEY` | The file path of the private key used to ssh into the CAPV VMs | `/home/foo/bar-ssh.key` | +| `VSPHERE_SSH_AUTHORIZED_KEY` | The public key that is added to the CAPV VMs | `ssh-rsa ABCDEF...XYZ=` | +| `VSPHERE_TLS_THUMBPRINT` | The TLS thumbprint of the vSphere server's certificate which should be trusted | `2A:3F:BC:CA:C0:96:35:D4:B7:A2:AA:3C:C1:33:D9:D7:BE:EC:31:55` | +| `CONTROL_PLANE_ENDPOINT_IP` | The IP that kube-vip should use as a control plane endpoint. It will not be used if `E2E_IPAM_KUBECONFIG` is set. | `10.10.123.100` | +| `VSPHERE_STORAGE_POLICY` | The name of an existing vSphere storage policy to be assigned to created VMs | `my-test-sp` | ### Flags -| Flag | Description | Default Value | -|-------------------------|----------------------------------------------------------------------------------------------------------|---------------| -| `SKIP_RESOURCE_CLEANUP` | This flags skips cleanup of the resources created during the tests as well as the kind/bootstrap cluster | `false` | -| `USE_EXISTING_CLUSTER` | This flag enables the usage of an existing K8S cluster as the management cluster to run tests against. | `false` | -| `GINKGO_TEST_TIMEOUT` | This sets the timeout for the E2E test suite. | `2h` | -| `GINKGO_FOCUS` | This populates the `-focus` flag of the `ginkgo` run command. | `""` | +| Flag | Description | Default Value | +|-------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| +| `SKIP_RESOURCE_CLEANUP` | This flags skips cleanup of the resources created during the tests as well as the kind/bootstrap cluster | `false` | +| `USE_EXISTING_CLUSTER` | This flag enables the usage of an existing K8S cluster as the management cluster to run tests against. | `false` | +| `GINKGO_TEST_TIMEOUT` | This sets the timeout for the E2E test suite. | `2h` | +| `GINKGO_FOCUS` | This populates the `-focus` flag of the `ginkgo` run command. | `""` | +| `E2E_IPAM_KUBECONFIG` | This flag points to a kubeconfig where the in-cluster IPAM provider is running to dynamically claim IP addresses for tests. If this is set, the environment variable `CONTROL_PLANE_ENDPOINT_IP` gets ignored. | `""` | ### Running the e2e tests diff --git a/test/e2e/anti_affinity_test.go b/test/e2e/anti_affinity_test.go index aedf2b68ca..ac78fe809b 100644 --- a/test/e2e/anti_affinity_test.go +++ b/test/e2e/anti_affinity_test.go @@ -34,6 +34,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi/clustermodules" + . "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/helper" + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) type AntiAffinitySpecInput struct { @@ -46,6 +48,17 @@ type AntiAffinitySpecInput struct { var _ = Describe("Cluster creation with anti affined nodes", func() { var namespace *corev1.Namespace + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + BeforeEach(func() { Expect(bootstrapClusterProxy).NotTo(BeNil(), "BootstrapClusterProxy can't be nil") namespace = setupSpecNamespace("anti-affinity-e2e") @@ -66,7 +79,7 @@ var _ = Describe("Cluster creation with anti affined nodes", func() { }, Global: GlobalInput{ BootstrapClusterProxy: bootstrapClusterProxy, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, E2EConfig: e2eConfig, ArtifactFolder: artifactFolder, }, diff --git a/test/e2e/capi_machine_deployment_rollout_test.go b/test/e2e/capi_machine_deployment_rollout_test.go index 3a684c1669..d9bcdb6bd0 100644 --- a/test/e2e/capi_machine_deployment_rollout_test.go +++ b/test/e2e/capi_machine_deployment_rollout_test.go @@ -18,15 +18,29 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) var _ = Describe("ClusterAPI Machine Deployment Tests", func() { Context("Running the MachineDeployment rollout spec", func() { + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + capi_e2e.MachineDeploymentRolloutSpec(ctx, func() capi_e2e.MachineDeploymentRolloutSpecInput { return capi_e2e.MachineDeploymentRolloutSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, diff --git a/test/e2e/cluster_upgrade_test.go b/test/e2e/cluster_upgrade_test.go index 71b819fff8..b4f9fa4731 100644 --- a/test/e2e/cluster_upgrade_test.go +++ b/test/e2e/cluster_upgrade_test.go @@ -18,15 +18,29 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" "k8s.io/utils/ptr" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) var _ = Describe("When upgrading a workload cluster using ClusterClass [ClusterClass]", func() { + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + capi_e2e.ClusterUpgradeConformanceSpec(ctx, func() capi_e2e.ClusterUpgradeConformanceSpecInput { return capi_e2e.ClusterUpgradeConformanceSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, diff --git a/test/e2e/clusterclass_changes_test.go b/test/e2e/clusterclass_changes_test.go index 4527bffe0a..be0bba2976 100644 --- a/test/e2e/clusterclass_changes_test.go +++ b/test/e2e/clusterclass_changes_test.go @@ -18,14 +18,28 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" capie2e "sigs.k8s.io/cluster-api/test/e2e" + + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) var _ = Describe("When testing ClusterClass changes [ClusterClass]", func() { + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + capie2e.ClusterClassChangesSpec(ctx, func() capie2e.ClusterClassChangesSpecInput { return capie2e.ClusterClassChangesSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index 41c8442caa..1db4d14276 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -18,14 +18,28 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.9=>current, CAPI 1.6=>1.6) [ClusterClass]", func() { + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath, "WORKLOAD_CONTROL_PLANE_ENDPOINT_IP") + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { return capi_e2e.ClusterctlUpgradeSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, @@ -46,10 +60,21 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.9= }) var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.8=>current, CAPI 1.5=>1.6) [ClusterClass]", func() { + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath, "WORKLOAD_CONTROL_PLANE_ENDPOINT_IP") + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { return capi_e2e.ClusterctlUpgradeSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, diff --git a/test/e2e/common.go b/test/e2e/common.go index 72af198dbe..deea2df659 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -18,10 +18,8 @@ limitations under the License. package e2e import ( - "fmt" "path/filepath" - . "github.com/onsi/ginkgo/v2" "github.com/vmware/govmomi" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/vapi/rest" @@ -34,10 +32,6 @@ const ( KubernetesVersion = "KUBERNETES_VERSION" ) -func Byf(format string, a ...interface{}) { - By(fmt.Sprintf(format, a...)) -} - type InfraClients struct { Client *govmomi.Client RestClient *rest.Client diff --git a/test/e2e/dhcp_overrides_test.go b/test/e2e/dhcp_overrides_test.go index 3111eb1819..ed615f5ef5 100644 --- a/test/e2e/dhcp_overrides_test.go +++ b/test/e2e/dhcp_overrides_test.go @@ -32,6 +32,7 @@ import ( "sigs.k8s.io/cluster-api/util" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) type GuestInfoMetadata struct { @@ -52,6 +53,17 @@ type DHCPOverrides struct { var _ = Describe("DHCPOverrides configuration test", func() { When("Creating a cluster with DHCPOverrides configured", func() { + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + const specName = "dhcp-overrides" var namespace *corev1.Namespace @@ -72,7 +84,7 @@ var _ = Describe("DHCPOverrides configuration test", func() { ClusterProxy: bootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, Flavor: "dhcp-overrides", diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index b36aa445ea..260f1c304c 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -38,6 +38,8 @@ import ( ctrl "sigs.k8s.io/controller-runtime" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + . "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/helper" + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" vsphereframework "sigs.k8s.io/cluster-api-provider-vsphere/test/framework" ) @@ -84,6 +86,13 @@ var ( bootstrapClusterProxy framework.ClusterProxy namespaces map[*corev1.Namespace]context.CancelFunc + + // e2eIPAMKubeconfig is a kubeconfig to a cluster which provides IP address management via an in-cluster + // IPAM provider to claim IPs for the control plane IPs of created clusters. + e2eIPAMKubeconfig string + + // ipamHelper is used to claim and cleanup IP addresses used for kubernetes control plane API Servers. + ipamHelper ipam.Helper ) func init() { @@ -92,6 +101,7 @@ func init() { flag.BoolVar(&alsoLogToFile, "e2e.also-log-to-file", true, "if true, ginkgo logs are additionally written to the `ginkgo-log.txt` file in the artifacts folder (including timestamps)") flag.BoolVar(&skipCleanup, "e2e.skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped") flag.BoolVar(&useExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)") + flag.StringVar(&e2eIPAMKubeconfig, "e2e.ipam-kubeconfig", "", "path to the kubeconfig for the IPAM cluster") } func TestE2E(t *testing.T) { @@ -134,8 +144,6 @@ var _ = SynchronizedBeforeSuite(func() []byte { e2eConfig, err = vsphereframework.LoadE2EConfig(ctx, configPath) Expect(err).NotTo(HaveOccurred()) - By("Initializing the vSphere session to ensure credentials are working", initVSphereSession) - Byf("Creating a clusterctl local repository into %q", artifactFolder) clusterctlConfigPath, err = vsphereframework.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactFolder, "repository"), true) Expect(err).NotTo(HaveOccurred()) @@ -146,29 +154,51 @@ var _ = SynchronizedBeforeSuite(func() []byte { By("Initializing the bootstrap cluster") vsphereframework.InitBootstrapCluster(ctx, bootstrapClusterProxy, e2eConfig, clusterctlConfigPath, artifactFolder) - namespaces = map[*corev1.Namespace]context.CancelFunc{} + + ipamLabels := ipam.GetIPAddressClaimLabels() + var ipamLabelsRaw []string + for k, v := range ipamLabels { + ipamLabelsRaw = append(ipamLabelsRaw, fmt.Sprintf("%s=%s", k, v)) + } + return []byte( strings.Join([]string{ artifactFolder, configPath, clusterctlConfigPath, bootstrapClusterProxy.GetKubeconfigPath(), + strings.Join(ipamLabelsRaw, ";"), }, ","), ) }, func(data []byte) { // Before each ParallelNode. parts := strings.Split(string(data), ",") - Expect(parts).To(HaveLen(4)) + Expect(parts).To(HaveLen(5)) artifactFolder = parts[0] configPath = parts[1] clusterctlConfigPath = parts[2] kubeconfigPath := parts[3] + ipamLabelsRaw := parts[4] + + namespaces = map[*corev1.Namespace]context.CancelFunc{} + + By("Initializing the vSphere session to ensure credentials are working", initVSphereSession) var err error e2eConfig, err = vsphereframework.LoadE2EConfig(ctx, configPath) Expect(err).NotTo(HaveOccurred()) bootstrapClusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, initScheme(), framework.WithMachineLogCollector(LogCollector{})) + + ipamLabels := map[string]string{} + for _, s := range strings.Split(ipamLabelsRaw, ";") { + splittedLabel := strings.Split(s, "=") + Expect(splittedLabel).To(HaveLen(2)) + + ipamLabels[splittedLabel[0]] = splittedLabel[1] + } + ipamHelper, err = ipam.New(e2eIPAMKubeconfig, ipamLabels, skipCleanup) + Expect(err).ToNot(HaveOccurred()) }) // Using a SynchronizedAfterSuite for controlling how to delete resources shared across ParallelNodes (~ginkgo threads). @@ -178,10 +208,16 @@ var _ = SynchronizedAfterSuite(func() { // After each ParallelNode. }, func() { // After all ParallelNodes. + if !skipCleanup { + By("Cleaning up orphaned IPAddressClaims") + vSphereFolderName, err := getClusterctlConfigVariable(clusterctlConfigPath, "VSPHERE_FOLDER") + Expect(err).ToNot(HaveOccurred()) + Expect(ipamHelper.Teardown(ctx, vSphereFolderName, vsphereClient)).To(Succeed()) + } By("Cleaning up the vSphere session", terminateVSphereSession) - By("Tearing down the management cluster") if !skipCleanup { + By("Tearing down the management cluster") vsphereframework.TearDown(ctx, bootstrapClusterProvider, bootstrapClusterProxy) } }) diff --git a/test/e2e/govmomi_test.go b/test/e2e/govmomi_test.go index 10e0e60ac3..0dedcf335a 100644 --- a/test/e2e/govmomi_test.go +++ b/test/e2e/govmomi_test.go @@ -20,6 +20,7 @@ import ( "flag" "net/url" "os" + "path/filepath" "time" . "github.com/onsi/ginkgo/v2" @@ -31,6 +32,7 @@ import ( "github.com/vmware/govmomi/vapi/rest" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/soap" + "sigs.k8s.io/yaml" ) var ( @@ -92,3 +94,26 @@ func initVSphereSession() { func terminateVSphereSession() { Expect(vsphereClient.Logout(ctx)).To(Succeed()) } + +func getClusterctlConfigVariable(clusterctlConfigPath, configVariable string) (string, error) { + // Environment variable overwrite takes priority. + if val := os.Getenv(configVariable); val != "" { + return val, nil + } + + data, err := os.ReadFile(filepath.Clean(clusterctlConfigPath)) + if err != nil { + return "", err + } + + type clusterctlConfig struct { + Variables map[string]string `json:"variables"` + } + + config := clusterctlConfig{} + if err := yaml.Unmarshal(data, &config); err != nil { + return "", err + } + + return config.Variables[configVariable], nil +} diff --git a/test/e2e/gpu_pci_passthrough_test.go b/test/e2e/gpu_pci_passthrough_test.go index 2d730860b1..6e98980211 100644 --- a/test/e2e/gpu_pci_passthrough_test.go +++ b/test/e2e/gpu_pci_passthrough_test.go @@ -29,6 +29,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework/clusterctl" capiutil "sigs.k8s.io/cluster-api/util" + + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) var _ = Describe("Cluster creation with GPU devices as PCI passthrough [specialized-infra]", func() { @@ -36,6 +38,17 @@ var _ = Describe("Cluster creation with GPU devices as PCI passthrough [speciali namespace *corev1.Namespace ) + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + BeforeEach(func() { Expect(bootstrapClusterProxy).NotTo(BeNil(), "BootstrapClusterProxy can't be nil") namespace = setupSpecNamespace("gpu-pci") @@ -48,7 +61,7 @@ var _ = Describe("Cluster creation with GPU devices as PCI passthrough [speciali ClusterProxy: bootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, Flavor: "pci", diff --git a/test/e2e/hardware_upgrade_test.go b/test/e2e/hardware_upgrade_test.go index e1c4f7d140..ff5baf7f66 100644 --- a/test/e2e/hardware_upgrade_test.go +++ b/test/e2e/hardware_upgrade_test.go @@ -30,6 +30,8 @@ import ( capiutil "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" + . "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/helper" + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) type HardwareUpgradeSpecInput struct { @@ -45,6 +47,17 @@ var _ = Describe("Hardware version upgrade", func() { namespace *corev1.Namespace ) + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + BeforeEach(func() { Expect(bootstrapClusterProxy).NotTo(BeNil(), "BootstrapClusterProxy can't be nil") namespace = setupSpecNamespace("hw-upgrade-e2e") @@ -68,7 +81,7 @@ var _ = Describe("Hardware version upgrade", func() { }, Global: GlobalInput{ BootstrapClusterProxy: bootstrapClusterProxy, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, E2EConfig: e2eConfig, ArtifactFolder: artifactFolder, }, diff --git a/test/e2e/helper/helper.go b/test/e2e/helper/helper.go new file mode 100644 index 0000000000..6396096a53 --- /dev/null +++ b/test/e2e/helper/helper.go @@ -0,0 +1,29 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package helper provides helper functions. +package helper + +import ( + "fmt" + + "github.com/onsi/ginkgo/v2" +) + +// Byf is By but with fmt.Sprintf. +func Byf(format string, a ...interface{}) { + ginkgo.By(fmt.Sprintf(format, a...)) +} diff --git a/test/e2e/ipam/ipamhelper.go b/test/e2e/ipam/ipamhelper.go new file mode 100644 index 0000000000..674891485f --- /dev/null +++ b/test/e2e/ipam/ipamhelper.go @@ -0,0 +1,394 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package ipam is a helper to claim ip addresses from an IPAM provider cluster. +package ipam + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/vim25/mo" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + . "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/helper" +) + +var ipamScheme *runtime.Scheme + +const controlPlaneEndpointVariable = "CONTROL_PLANE_ENDPOINT_IP" + +func init() { + ipamScheme = runtime.NewScheme() + _ = ipamv1.AddToScheme(ipamScheme) +} + +type IPAddressClaims []*ipamv1.IPAddressClaim + +type Helper interface { + // ClaimIPs claims IP addresses with the variable name `CONTROL_PLANE_ENDPOINT_IP` and whatever is passed as + // additionalIPVariableNames and creates a new clusterctl config file. + // It returns the path to the new clusterctl config file and a slice of IPAddressClaims. + ClaimIPs(ctx context.Context, clusterctlConfigPath string, additionalIPVariableNames ...string) (localClusterctlConfigFile string, claims IPAddressClaims) + + // Cleanup deletes the given IPAddressClaims. + Cleanup(ctx context.Context, claims IPAddressClaims) error + + // Teardown tries to cleanup orphaned IPAddressClaims by checking if the corresponding IPs are still in use in vSphere. + // It identifies IPAddressClaims via labels. + Teardown(ctx context.Context, folderName string, vSphereClient *govmomi.Client) error +} + +// New returns an ipam.Helper. If e2eIPAMKubeconfig is an empty string or skipCleanup is true +// it will return a noop helper which does nothing so we can fallback on setting environment variables. +func New(e2eIPAMKubeconfig string, labels map[string]string, skipCleanup bool) (Helper, error) { + if len(labels) == 0 { + return nil, fmt.Errorf("expecting labels to be set to prevent deletion of other IPAddressClaims") + } + + if e2eIPAMKubeconfig == "" { + return &noopHelper{}, nil + } + + ipamClient, err := getClient(e2eIPAMKubeconfig) + if err != nil { + return nil, err + } + + return &helper{ + labels: labels, + client: ipamClient, + skipCleanup: skipCleanup, + }, nil +} + +type helper struct { + client client.Client + labels map[string]string + skipCleanup bool +} + +func (h *helper) ClaimIPs(ctx context.Context, clusterctlConfigPath string, additionalIPVariableNames ...string) (string, IPAddressClaims) { + variables := map[string]string{} + + ipAddressClaims := []*ipamv1.IPAddressClaim{} + + // Claim an IP per variable. + for _, variable := range append(additionalIPVariableNames, controlPlaneEndpointVariable) { + ip, ipAddressClaim, err := h.claimIPAddress(ctx) + Expect(err).ToNot(HaveOccurred()) + ipAddressClaims = append(ipAddressClaims, ipAddressClaim) + Byf("Setting clusterctl variable %s to %s", variable, ip) + variables[variable] = ip + } + + // Create a new clusterctl config file based on the passed file and add the new variables for the IPs. + modifiedClusterctlConfigPath := fmt.Sprintf("%s-%s.yaml", strings.TrimSuffix(clusterctlConfigPath, ".yaml"), rand.String(16)) + Byf("Writing a new clusterctl config to %s", modifiedClusterctlConfigPath) + copyAndAmendClusterctlConfig(ctx, copyAndAmendClusterctlConfigInput{ + ClusterctlConfigPath: clusterctlConfigPath, + OutputPath: modifiedClusterctlConfigPath, + Variables: variables, + }) + + return modifiedClusterctlConfigPath, ipAddressClaims +} + +// Cleanup deletes the IPAddressClaims passed. +func (h *helper) Cleanup(ctx context.Context, ipAddressClaims IPAddressClaims) error { + if CurrentSpecReport().Failed() { + By("Skipping cleanup of IPAddressClaims because the tests failed and the IPs could still be in use") + return nil + } + + if h.skipCleanup { + By("Skipping cleanup of IPAddressClaims because skipCleanup is set to true") + return nil + } + + var errList []error + + for _, ipAddressClaim := range ipAddressClaims { + ipAddressClaim := ipAddressClaim + Byf("Deleting IPAddressClaim %s", klog.KObj(ipAddressClaim)) + if err := h.client.Delete(ctx, ipAddressClaim); err != nil { + errList = append(errList, err) + } + } + + if len(errList) > 0 { + return kerrors.NewAggregate(errList) + } + return nil +} + +// GetIPAddressClaimLabels returns a labels map from the prow environment variables +// BUILD_ID and JOB_NAME. If none of both is set it falls back to add a custom random +// label. +func GetIPAddressClaimLabels() map[string]string { + labels := map[string]string{} + if val := os.Getenv("BUILD_ID"); val != "" { + labels["prow.k8s.io/build-id"] = val + } + if val := os.Getenv("JOB_NAME"); val != "" { + labels["prow.k8s.io/job"] = val + } + if len(labels) == 0 { + // Adding a custom label so we don't accidentally cleanup other IPAddressClaims + labels["capv-testing/random-uid"] = rand.String(32) + } + return labels +} + +// Teardown lists all IPAddressClaims matching the passed labels and deletes the IPAddressClaim +// if there are no VirtualMachines in vCenter using the IP address. +func (h *helper) Teardown(ctx context.Context, folderName string, vSphereClient *govmomi.Client) error { + if h.skipCleanup { + By("Skipping cleanup of IPAddressClaims because skipCleanup is set to true") + return nil + } + + virtualMachineIPAddresses, err := getVirtualMachineIPAddresses(ctx, folderName, vSphereClient) + if err != nil { + return err + } + // List all IPAddressClaims created matching the labels. + ipAddressClaims := &ipamv1.IPAddressClaimList{} + if err := h.client.List(ctx, ipAddressClaims, + client.MatchingLabels(h.labels), + client.InNamespace(metav1.NamespaceDefault), + ); err != nil { + return err + } + + ipAddressClaimsToDelete := []*ipamv1.IPAddressClaim{} + // Collect errors and skip these ip address claims, but report at the end. + var errList []error + + ip := &ipamv1.IPAddress{} + for _, ipAddressClaim := range ipAddressClaims.Items { + ipAddressClaim := ipAddressClaim + if ipAddressClaim.Status.AddressRef.Name == "" { + continue + } + if err := h.client.Get(ctx, client.ObjectKey{Namespace: ipAddressClaim.GetNamespace(), Name: ipAddressClaim.Status.AddressRef.Name}, ip); err != nil { + // If we are not able to get an IP Address we skip the deletion for it but collect and return the error. + errList = append(errList, errors.Wrapf(err, "getting IPAddress for IPAddressClaim %s", klog.KObj(&ipAddressClaim))) + continue + } + + // Skip deletion if there is still a virtual machine which refers this IP address. + if virtualMachineIPAddresses[ip.Spec.Address] { + continue + } + + ipAddressClaimsToDelete = append(ipAddressClaimsToDelete, &ipAddressClaim) + } + + if err := h.Cleanup(ctx, ipAddressClaimsToDelete); err != nil { + // Group with possible previous errors. + errList = append(errList, err) + } + + if len(errList) > 0 { + return kerrors.NewAggregate(errList) + } + return nil +} + +func getClient(e2eIPAMKubeconfig string) (client.Client, error) { + kubeConfig, err := os.ReadFile(filepath.Clean(e2eIPAMKubeconfig)) + if err != nil { + return nil, err + } + + restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeConfig) + if err != nil { + return nil, err + } + + return client.New(restConfig, client.Options{Scheme: ipamScheme}) +} + +// getVirtualMachineIPAddresses lists all VirtualMachines in the given folder and +// returns a map which contains the IP addresses of all machines. +// If the given folder is not found it will return an error. +func getVirtualMachineIPAddresses(ctx context.Context, folderName string, vSphereClient *govmomi.Client) (map[string]bool, error) { + finder := find.NewFinder(vSphereClient.Client) + + // Find the given folder. + folder, err := finder.FolderOrDefault(ctx, folderName) + if err != nil { + return nil, err + } + + // List all VirtualMachines in the folder. + managedObjects, err := finder.ManagedObjectListChildren(ctx, folder.InventoryPath+"/...", "VirtualMachine") + if err != nil { + return nil, err + } + + var vm mo.VirtualMachine + virtualMachineIPAddresses := map[string]bool{} + + // Iterate over the VirtualMachines, get the `guest.net` property and extract the IP addresses. + for _, mobj := range managedObjects { + // Get guest.net properties for mobj. + if err := vSphereClient.RetrieveOne(ctx, mobj.Object.Reference(), []string{"guest.net"}, &vm); err != nil { + return nil, errors.Wrapf(err, "get properties of VM %s", mobj.Object.Reference()) + } + // Iterate over all nics and add IP addresses to virtualMachineIPAddresses. + for _, nic := range vm.Guest.Net { + if nic.IpConfig == nil { + continue + } + for _, ip := range nic.IpConfig.IpAddress { + virtualMachineIPAddresses[ip.IpAddress] = true + } + } + } + + return virtualMachineIPAddresses, nil +} + +func (h *helper) claimIPAddress(ctx context.Context) (_ string, _ *ipamv1.IPAddressClaim, err error) { + claim := &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ipclaim-" + rand.String(32), + Namespace: metav1.NamespaceDefault, + Labels: h.labels, + }, + Spec: ipamv1.IPAddressClaimSpec{ + PoolRef: corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: "InClusterIPPool", + Name: "capv-e2e-ippool", + }, + }, + } + + // Create an IPAddressClaim + Byf("Creating IPAddressClaim %s", klog.KObj(claim)) + if err := h.client.Create(ctx, claim); err != nil { + return "", nil, err + } + // Store claim inside the service so the cleanup function knows what to delete. + ip := &ipamv1.IPAddress{} + + var retryError error + // Wait for the IPAddressClaim to refer an IPAddress. + _ = wait.PollUntilContextTimeout(ctx, time.Second, time.Second*30, true, func(ctx context.Context) (done bool, err error) { + if err := h.client.Get(ctx, client.ObjectKeyFromObject(claim), claim); err != nil { + retryError = errors.Wrap(err, "getting IPAddressClaim") + return false, nil + } + + if claim.Status.AddressRef.Name == "" { + retryError = errors.Wrap(err, "IPAddressClaim.Status.AddressRef.Name is not set") + return false, nil + } + + if err := h.client.Get(ctx, client.ObjectKey{Namespace: claim.GetNamespace(), Name: claim.Status.AddressRef.Name}, ip); err != nil { + retryError = errors.Wrap(err, "getting IPAddress") + return false, nil + } + if ip.Spec.Address == "" { + retryError = errors.Wrap(err, "IPAddress.Spec.Address is not set") + return false, nil + } + + retryError = nil + return true, nil + }) + if retryError != nil { + return "", nil, retryError + } + + return ip.Spec.Address, claim, nil +} + +// Note: Copy-paste from CAPI below. + +// copyAndAmendClusterctlConfigInput is the input for copyAndAmendClusterctlConfig. +type copyAndAmendClusterctlConfigInput struct { + ClusterctlConfigPath string + OutputPath string + Variables map[string]string +} + +// copyAndAmendClusterctlConfig copies the clusterctl-config from ClusterctlConfigPath to +// OutputPath and adds the given Variables. +func copyAndAmendClusterctlConfig(_ context.Context, input copyAndAmendClusterctlConfigInput) { + // Read clusterctl config from ClusterctlConfigPath. + clusterctlConfigFile := &clusterctlConfig{ + Path: input.ClusterctlConfigPath, + } + clusterctlConfigFile.read() + + // Overwrite variables. + if clusterctlConfigFile.Values == nil { + clusterctlConfigFile.Values = map[string]interface{}{} + } + for key, value := range input.Variables { + clusterctlConfigFile.Values[key] = value + } + + // Write clusterctl config to OutputPath. + clusterctlConfigFile.Path = input.OutputPath + clusterctlConfigFile.write() +} + +type clusterctlConfig struct { + Path string + Values map[string]interface{} +} + +// write writes a clusterctl config file to disk. +func (c *clusterctlConfig) write() { + data, err := yaml.Marshal(c.Values) + Expect(err).ToNot(HaveOccurred(), "Failed to marshal the clusterctl config file") + + Expect(os.WriteFile(c.Path, data, 0600)).To(Succeed(), "Failed to write the clusterctl config file") +} + +// read reads a clusterctl config file from disk. +func (c *clusterctlConfig) read() { + data, err := os.ReadFile(c.Path) + Expect(err).ToNot(HaveOccurred()) + + err = yaml.Unmarshal(data, &c.Values) + Expect(err).ToNot(HaveOccurred(), "Failed to unmarshal the clusterctl config file") +} diff --git a/test/e2e/ipam/noop.go b/test/e2e/ipam/noop.go new file mode 100644 index 0000000000..508e4aabdd --- /dev/null +++ b/test/e2e/ipam/noop.go @@ -0,0 +1,41 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package ipam is a helper to claim ip addresses from an IPAM provider cluster. +package ipam + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + "github.com/vmware/govmomi" +) + +type noopHelper struct{} + +func (h *noopHelper) ClaimIPs(_ context.Context, _ string, _ ...string) (string, IPAddressClaims) { + return "", nil +} + +func (h *noopHelper) Cleanup(_ context.Context, _ IPAddressClaims) error { + By("Skipping cleanup of IPAddressClaims because of using ipam.noopHelper") + return nil +} + +func (*noopHelper) Teardown(_ context.Context, _ string, _ *govmomi.Client) error { + By("Skipping teardown of IPAddressClaims because of using ipam.noopHelper") + return nil +} diff --git a/test/e2e/k8s_conformance_test.go b/test/e2e/k8s_conformance_test.go index 6336d44726..7928d67ca6 100644 --- a/test/e2e/k8s_conformance_test.go +++ b/test/e2e/k8s_conformance_test.go @@ -18,14 +18,28 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) var _ = Describe("When testing K8S conformance [Conformance]", func() { + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + capi_e2e.K8SConformanceSpec(ctx, func() capi_e2e.K8SConformanceSpecInput { return capi_e2e.K8SConformanceSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, diff --git a/test/e2e/md_scale_test.go b/test/e2e/md_scale_test.go index f11941af72..4e30a3894c 100644 --- a/test/e2e/md_scale_test.go +++ b/test/e2e/md_scale_test.go @@ -18,14 +18,28 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) var _ = Describe("When testing MachineDeployment scale out/in", func() { + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + capi_e2e.MachineDeploymentScaleSpec(ctx, func() capi_e2e.MachineDeploymentScaleSpecInput { return capi_e2e.MachineDeploymentScaleSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, diff --git a/test/e2e/multivc_test.go b/test/e2e/multivc_test.go index 617ce78b5d..ede6087438 100644 --- a/test/e2e/multivc_test.go +++ b/test/e2e/multivc_test.go @@ -33,6 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + . "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/helper" vsphereframework "sigs.k8s.io/cluster-api-provider-vsphere/test/framework" ) diff --git a/test/e2e/node_drain_timeout_test.go b/test/e2e/node_drain_timeout_test.go index 30c8039626..e8ad042577 100644 --- a/test/e2e/node_drain_timeout_test.go +++ b/test/e2e/node_drain_timeout_test.go @@ -18,14 +18,28 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) var _ = Describe("When testing node drain timeout", func() { + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + capi_e2e.NodeDrainTimeoutSpec(ctx, func() capi_e2e.NodeDrainTimeoutSpecInput { return capi_e2e.NodeDrainTimeoutSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, diff --git a/test/e2e/node_labeling_test.go b/test/e2e/node_labeling_test.go index 97c3d45e54..efc7078819 100644 --- a/test/e2e/node_labeling_test.go +++ b/test/e2e/node_labeling_test.go @@ -27,6 +27,8 @@ import ( "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/constants" + . "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/helper" + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) type NodeLabelingSpecInput struct { @@ -40,6 +42,17 @@ var _ = Describe("Label nodes with ESXi host info", func() { namespace *corev1.Namespace ) + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + BeforeEach(func() { Expect(bootstrapClusterProxy).NotTo(BeNil(), "BootstrapClusterProxy can't be nil") namespace = setupSpecNamespace("node-labeling-e2e") @@ -59,7 +72,7 @@ var _ = Describe("Label nodes with ESXi host info", func() { }, Global: GlobalInput{ BootstrapClusterProxy: bootstrapClusterProxy, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, E2EConfig: e2eConfig, ArtifactFolder: artifactFolder, }, diff --git a/test/e2e/ownerreference_test.go b/test/e2e/ownerreference_test.go index 89af392ebb..88664cb27d 100644 --- a/test/e2e/ownerreference_test.go +++ b/test/e2e/ownerreference_test.go @@ -39,9 +39,21 @@ import ( ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) var _ = Describe("OwnerReference checks with FailureDomains and ClusterIdentity", func() { + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + // Before running the test create the secret used by the VSphereClusterIdentity to connect to the vCenter. BeforeEach(func() { createVsphereIdentitySecret(ctx, bootstrapClusterProxy) @@ -50,7 +62,7 @@ var _ = Describe("OwnerReference checks with FailureDomains and ClusterIdentity" capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { return capi_e2e.QuickStartSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, diff --git a/test/e2e/quick_start_test.go b/test/e2e/quick_start_test.go index cf49913240..4368b9aa98 100644 --- a/test/e2e/quick_start_test.go +++ b/test/e2e/quick_start_test.go @@ -18,15 +18,29 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" "k8s.io/utils/ptr" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) var _ = Describe("Cluster Creation using Cluster API quick-start test", func() { + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { return capi_e2e.QuickStartSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, @@ -35,10 +49,21 @@ var _ = Describe("Cluster Creation using Cluster API quick-start test", func() { }) var _ = Describe("ClusterClass Creation using Cluster API quick-start test [PR-Blocking] [ClusterClass]", func() { + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { return capi_e2e.QuickStartSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, @@ -48,10 +73,21 @@ var _ = Describe("ClusterClass Creation using Cluster API quick-start test [PR-B }) var _ = Describe("Cluster creation with [Ignition] bootstrap [PR-Blocking]", func() { + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { return capi_e2e.QuickStartSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, diff --git a/test/e2e/storage_policy_test.go b/test/e2e/storage_policy_test.go index 4875fa1ec7..b551bbcf36 100644 --- a/test/e2e/storage_policy_test.go +++ b/test/e2e/storage_policy_test.go @@ -33,6 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/test/e2e/ipam" ) type StoragePolicySpecInput struct { @@ -45,6 +46,17 @@ type StoragePolicySpecInput struct { var _ = Describe("Cluster creation with storage policy", func() { var namespace *corev1.Namespace + var ( + testSpecificClusterctlConfigPath string + testSpecificIPAddressClaims ipam.IPAddressClaims + ) + BeforeEach(func() { + testSpecificClusterctlConfigPath, testSpecificIPAddressClaims = ipamHelper.ClaimIPs(ctx, clusterctlConfigPath) + }) + defer AfterEach(func() { + Expect(ipamHelper.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + }) + BeforeEach(func() { Expect(bootstrapClusterProxy).NotTo(BeNil(), "BootstrapClusterProxy can't be nil") namespace = setupSpecNamespace("capv-e2e") @@ -65,7 +77,7 @@ var _ = Describe("Cluster creation with storage policy", func() { }, Global: GlobalInput{ BootstrapClusterProxy: bootstrapClusterProxy, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: testSpecificClusterctlConfigPath, E2EConfig: e2eConfig, ArtifactFolder: artifactFolder, }, @@ -86,7 +98,7 @@ func VerifyStoragePolicy(ctx context.Context, input StoragePolicySpecInput) { By("creating a workload cluster") configCluster := defaultConfigCluster(clusterName, namespace.Name, specName, 1, 0, GlobalInput{ BootstrapClusterProxy: bootstrapClusterProxy, - ClusterctlConfigPath: clusterctlConfigPath, + ClusterctlConfigPath: input.Global.ClusterctlConfigPath, E2EConfig: e2eConfig, ArtifactFolder: artifactFolder, }) diff --git a/test/go.mod b/test/go.mod index 43a3f76737..9058344b7b 100644 --- a/test/go.mod +++ b/test/go.mod @@ -27,6 +27,7 @@ require ( sigs.k8s.io/cluster-api/test v0.0.0-00010101000000-000000000000 sigs.k8s.io/controller-runtime v0.16.3 sigs.k8s.io/kind v0.20.0 + sigs.k8s.io/yaml v1.4.0 ) require ( @@ -138,5 +139,4 @@ require ( k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect )