diff --git a/Makefile b/Makefile index ac371d9843..d20a75aafe 100644 --- a/Makefile +++ b/Makefile @@ -83,7 +83,6 @@ GINKGO_TIMEOUT ?= 3h E2E_CONF_FILE ?= $(abspath test/e2e/config/vsphere.yaml) E2E_CONF_OVERRIDE_FILE ?= $(abspath test/e2e/config/config-overrides.yaml) E2E_CAPV_MODE ?= govmomi -E2E_TARGET_TYPE ?= vcenter E2E_IPAM_KUBECONFIG ?= INTEGRATION_CONF_FILE ?= $(abspath test/integration/integration-dev.yaml) E2E_TEMPLATE_DIR := $(abspath test/e2e/data/infrastructure-vsphere/) @@ -585,7 +584,6 @@ e2e: $(GINKGO) $(KUSTOMIZE) $(KIND) $(GOVC) ## Run e2e tests --e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ --e2e.use-existing-cluster="$(USE_EXISTING_CLUSTER)" \ --e2e.capv-mode="$(E2E_CAPV_MODE)" \ - --e2e.target-type="$(E2E_TARGET_TYPE)" \ --e2e.ipam-kubeconfig="$(E2E_IPAM_KUBECONFIG)" ## -------------------------------------- diff --git a/hack/e2e.sh b/hack/e2e.sh index a0c3f18d67..5c945ca079 100755 --- a/hack/e2e.sh +++ b/hack/e2e.sh @@ -49,7 +49,7 @@ on_exit() { do echo "Cleaning up VSPHERE_PASSWORD from file ${file}" sed -i "s/${VSPHERE_PASSWORD}/REDACTED/g" "${file}" - done + done || true # Move all artifacts to the original artifacts location. mv "${ARTIFACTS}"/* "${ORIGINAL_ARTIFACTS}/" fi @@ -76,7 +76,6 @@ export VSPHERE_SSH_PRIVATE_KEY="/root/ssh/.private-key/private-key" export E2E_CONF_FILE="${REPO_ROOT}/test/e2e/config/vsphere.yaml" export E2E_CONF_OVERRIDE_FILE="" export E2E_CAPV_MODE="${CAPV_MODE:-govmomi}" -export E2E_TARGET_TYPE="${TARGET_TYPE:-vmc}" export ARTIFACTS="${ARTIFACTS:-${REPO_ROOT}/_artifacts}" export DOCKER_IMAGE_TAR="/tmp/images/image.tar" export GC_KIND="false" diff --git a/internal/test/helpers/vcsim/model.go b/internal/test/helpers/vcsim/model.go index 1ca370ee64..eadc865f0a 100644 --- a/internal/test/helpers/vcsim/model.go +++ b/internal/test/helpers/vcsim/model.go @@ -24,12 +24,20 @@ const ( // DefaultStoragePolicyName is the name of the default storage policy that exists when starting a new vcsim instance. DefaultStoragePolicyName = "vSAN Default Storage Policy" +) - // DefaultVMTemplateName is the name of the default VM template the vcsim controller adds to new vcsim instance. +var ( + // DefaultVMTemplates is the name of the default VM templates the vcsim controller adds to new vcsim instance. // Note: There are no default templates when starting a new vcsim instance. // Note: For the sake of testing with vcsim the template doesn't really matter (nor the version of K8s hosted on it) - // so the vcsim controller creates only a VM template with a well-known name. - DefaultVMTemplateName = "ubuntu-2204-kube-vX" + // but we must provide at least the templates that are expected by test cluster classes. + DefaultVMTemplates = []string{ + // NOTE: this list must be kept in sync with templates we are using in cluster classes. + // IMPORTANT: keep this list sorted from oldest to newest. + // TODO: consider if we want to make this extensible via the vCenterSimulator CR. + "ubuntu-2204-kube-v1.28.0", + "ubuntu-2204-kube-v1.29.0", + } ) // DatacenterName provide a function to compute vcsim datacenter names given its index. diff --git a/test/e2e/config/vsphere.yaml b/test/e2e/config/vsphere.yaml index 6eb994d42b..92b532e2be 100644 --- a/test/e2e/config/vsphere.yaml +++ b/test/e2e/config/vsphere.yaml @@ -18,6 +18,8 @@ images: loadBehavior: tryLoad - name: gcr.io/k8s-staging-capi-vsphere/cluster-api-vsphere-controller-{ARCH}:dev loadBehavior: mustLoad + - name: gcr.io/k8s-staging-capi-vsphere/cluster-api-vcsim-controller-{ARCH}:dev + loadBehavior: mustLoad - name: quay.io/jetstack/cert-manager-cainjector:v1.12.2 loadBehavior: tryLoad - name: quay.io/jetstack/cert-manager-webhook:v1.12.2 @@ -160,6 +162,17 @@ providers: - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.8/clusterclass-quick-start.yaml" - sourcePath: "../data/shared/v1.8/v1beta1_provider/metadata.yaml" + - name: vcsim + type: InfrastructureProvider + versions: + - name: v1.10.99 + # Use manifest from source files + value: ../../../../cluster-api-provider-vsphere/test/infrastructure/vcsim/config/default + contract: v1beta1 + files: + # Add cluster templates + - sourcePath: "../data/shared/main/v1beta1_provider/metadata.yaml" + variables: # Ensure all Kubernetes versions used here are covered in patch-vsphere-template.yaml KUBERNETES_VERSION: "v1.29.0" diff --git a/test/e2e/e2e_setup_test.go b/test/e2e/e2e_setup_test.go index 6aadc53c67..faa8576867 100644 --- a/test/e2e/e2e_setup_test.go +++ b/test/e2e/e2e_setup_test.go @@ -24,11 +24,13 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" "sigs.k8s.io/yaml" - "sigs.k8s.io/cluster-api-provider-vsphere/test/framework/ip" + vsphereip "sigs.k8s.io/cluster-api-provider-vsphere/test/framework/ip" + vspherevcsim "sigs.k8s.io/cluster-api-provider-vsphere/test/framework/vcsim" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" ) type setupOptions struct { @@ -63,14 +65,49 @@ func Setup(specName string, f func(testSpecificClusterctlConfigPathGetter func() var ( testSpecificClusterctlConfigPath string - testSpecificIPAddressClaims []types.NamespacedName + testSpecificIPAddressClaims vsphereip.AddressClaims testSpecificVariables map[string]string ) BeforeEach(func() { Byf("Setting up test env for %s", specName) - - Byf("Getting IP for %s", strings.Join(append([]string{"CONTROL_PLANE_ENDPOINT_IP"}, options.additionalIPVariableNames...), ",")) - testSpecificIPAddressClaims, testSpecificVariables = ipAddressManager.ClaimIPs(ctx, ip.WithGateway(options.gatewayIPVariableName), ip.WithIP(options.additionalIPVariableNames...)) + switch testTarget { + case VCenterTestTarget: + Byf("Getting IP for %s", strings.Join(append([]string{"CONTROL_PLANE_ENDPOINT_IP"}, options.additionalIPVariableNames...), ",")) + // get IPs from the in cluster address manager + testSpecificIPAddressClaims, testSpecificVariables = inClusterAddressManager.ClaimIPs(ctx, vsphereip.WithGateway(options.gatewayIPVariableName), vsphereip.WithIP(options.additionalIPVariableNames...)) + case VCSimTestTarget: + Byf("Getting IP for %s", strings.Join(append([]string{vsphereip.ControlPlaneEndpointIPVariable}, options.additionalIPVariableNames...), ",")) + + // get IPs from the vcsim controller + testSpecificIPAddressClaims, testSpecificVariables = vcsimAddressManager.ClaimIPs(ctx, vsphereip.WithIP(options.additionalIPVariableNames...)) + + Byf("Creating a vcsim server for %s", specName) + + // variables for govmomi mode derived from the vCenterSimulator + vCenterSimulator, err := vspherevcsim.Get(ctx, bootstrapClusterProxy.GetClient()) + Expect(err).ToNot(HaveOccurred(), "Failed to create VCenterSimulator") + + for k, v := range vCenterSimulator.GovmomiVariables() { + // unset corresponding env variable (that in CI contains VMC data), so we are sure we use the value for vcsim + if strings.HasPrefix(k, "VSPHERE_") { + Expect(os.Unsetenv(k)).To(Succeed()) + } + + testSpecificVariables[k] = v + } + + // variables for govmomi mode derived from envVar.Spec.Cluster + // NOTE: picking Datacenter, Cluster, Datastore that exists by default in vcsim + clusterEnvVarSpec := vcsimv1.ClusterEnvVarSpec{ + Datacenter: ptr.To[int32](0), // DC0 + Cluster: ptr.To[int32](0), // C0 + Datastore: ptr.To[int32](0), // LocalDS_0 + } + + for k, v := range clusterEnvVarSpec.GovmomiVariables() { + testSpecificVariables[k] = v + } + } // Create a new clusterctl config file based on the passed file and add the new variables for the IPs. testSpecificClusterctlConfigPath = fmt.Sprintf("%s-%s.yaml", strings.TrimSuffix(clusterctlConfigPath, ".yaml"), specName) @@ -83,13 +120,20 @@ func Setup(specName string, f func(testSpecificClusterctlConfigPathGetter func() }) defer AfterEach(func() { Byf("Cleaning up test env for %s", specName) - Expect(ipAddressManager.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + switch testTarget { + case VCenterTestTarget: + // cleanup IPs/controlPlaneEndpoint created by the in cluster ipam provider. + Expect(inClusterAddressManager.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + case VCSimTestTarget: + // cleanup IPs/controlPlaneEndpoint created by the vcsim controller manager. + Expect(vcsimAddressManager.Cleanup(ctx, testSpecificIPAddressClaims)).To(Succeed()) + } }) // NOTE: it is required to use a function to pass the testSpecificClusterctlConfigPath value into the test func, // so when the test is executed the func could get the value set into the BeforeEach block above. // If instead we pass the value directly, the test func will get the value at the moment of the initial parsing of - // the Ginkgo node tree, which is an empty string (the BeforeEach block above is not run during initial parsing). + // the Ginkgo node tree, which is an empty string (the BeforeEach block above are not run during initial parsing). f(func() string { return testSpecificClusterctlConfigPath }) } diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index d50e7facb7..2d557d7c6f 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -26,6 +26,7 @@ import ( "testing" . "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -42,6 +43,8 @@ import ( vsphereframework "sigs.k8s.io/cluster-api-provider-vsphere/test/framework" vsphereip "sigs.k8s.io/cluster-api-provider-vsphere/test/framework/ip" vspherelog "sigs.k8s.io/cluster-api-provider-vsphere/test/framework/log" + vspherevcsim "sigs.k8s.io/cluster-api-provider-vsphere/test/framework/vcsim" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" ) const ( @@ -57,10 +60,7 @@ const ( ) const ( - // VMCTestTarget identify tests targeting VMC infrastructure used for CAPV CI. - VMCTestTarget string = "vmc" - - // VCenterTestTarget identify tests targeting a user provided vCenter instance. + // VCenterTestTarget identify tests targeting a real vCenter instance, including also the VMC infrastructure used for CAPV CI. VCenterTestTarget string = "vcenter" // VCSimTestTarget identify tests targeting a vcsim instance (instead of a real vCenter). @@ -121,8 +121,11 @@ var ( // IPAM provider to claim IPs for the control plane IPs of created clusters. e2eIPAMKubeconfig string - // ipAddressManager is used to claim and cleanup IP addresses used for kubernetes control plane API Servers. - ipAddressManager vsphereip.AddressManager + // inClusterAddressManager is used to claim and cleanup IP addresses used for kubernetes control plane API Servers. + inClusterAddressManager vsphereip.AddressManager + + // vcsimAddressManager is used to claim and cleanup IP addresses used for kubernetes control plane API Servers. + vcsimAddressManager vsphereip.AddressManager ) type configOverrides struct { @@ -139,7 +142,6 @@ func init() { flag.BoolVar(&useExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)") flag.StringVar(&e2eIPAMKubeconfig, "e2e.ipam-kubeconfig", "", "path to the kubeconfig for the IPAM cluster") flag.StringVar(&testMode, "e2e.capv-mode", GovmomiTestMode, "defines how CAPV should behave during this test, one of govmomi|supervisor") - flag.StringVar(&testTarget, "e2e.target-type", VMCTestTarget, "defines which type of infrastructure this test targets, one of vmc|vcenter|vcsim") } func TestE2E(t *testing.T) { @@ -163,7 +165,37 @@ func TestE2E(t *testing.T) { defer w.Close() } - RunSpecs(t, "capv-e2e") + // fetch the current config + suiteConfig, reporterConfig := GinkgoConfiguration() + + // vcsim testd currently have a couple of limitations: + // - they can't be run together with other tests, because the vcsim controller will interfere with objects + // created by other tests. + // - in order to trick clusterctl to install the vcsim controller, it is defined as another infra provider, + // and thus the tests needs to be explicit about using vsphere as target infra provider, but not all the + // tests allows this option. + // + // In order to deal with this nicely, we detect if we are running a vcsim test or not, and edit the test suite and + // how do we setup the test accordingly. + testTarget = VCenterTestTarget + if strings.Contains(strings.Join(suiteConfig.FocusStrings, " "), "\\[vcsim\\]") { + testTarget = VCSimTestTarget + } + + // Automatically skip vcsim tests if not explicitly required. + // NOTE: This prevents to edit all the job configurations for non vcsim tests adding skip [vcsim] + if testTarget != VCSimTestTarget { + suiteConfig.SkipStrings = append(suiteConfig.SkipStrings, "\\[vcsim\\]") + } + + report := PreviewSpecs("capv-e2e", suiteConfig, reporterConfig) + for _, s := range report.SpecReports { + if s.State == types.SpecStatePassed { + fmt.Println(s.LeafNodeText, s.ContainerHierarchyTexts) + } + } + + RunSpecs(t, "capv-e2e", suiteConfig, reporterConfig) } // Using a SynchronizedBeforeSuite for controlling how to create resources shared across ParallelNodes (~ginkgo threads). @@ -181,24 +213,9 @@ var _ = SynchronizedBeforeSuite(func() []byte { var err error e2eConfig, err = vsphereframework.LoadE2EConfig(ctx, configPath) Expect(err).NotTo(HaveOccurred()) - if configOverridesPath != "" { - Expect(configOverridesPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config-overrides should be an existing file.") - Byf("Merging with e2e config overrides from %q", configOverridesPath) - configData, err := os.ReadFile(configOverridesPath) //nolint:gosec - Expect(err).ToNot(HaveOccurred(), "Failed to read e2e config overrides") - Expect(configData).ToNot(BeEmpty(), "The e2e config overrides should not be empty") - - configOverrides := &configOverrides{} - Expect(yaml.Unmarshal(configData, configOverrides)).To(Succeed(), "Failed to convert e2e config overrides to yaml") - - for k, v := range configOverrides.Variables { - e2eConfig.Variables[k] = v - } - for k, v := range configOverrides.Intervals { - e2eConfig.Intervals[k] = v - } - } + // Add config overrides + drop vcsim relates provider/image if not necessary. + amendE2EConfig() Byf("Creating a clusterctl local repository into %q", artifactFolder) clusterctlConfigPath, err = vsphereframework.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactFolder, "repository"), true) @@ -211,10 +228,17 @@ var _ = SynchronizedBeforeSuite(func() []byte { By("Initializing the bootstrap cluster") vsphereframework.InitBootstrapCluster(ctx, bootstrapClusterProxy, e2eConfig, clusterctlConfigPath, artifactFolder) - ipamLabels := vsphereip.GetIPAddressClaimLabels() - var ipamLabelsRaw []string - for k, v := range ipamLabels { - ipamLabelsRaw = append(ipamLabelsRaw, fmt.Sprintf("%s=%s", k, v)) + if testTarget == VCSimTestTarget { + Byf("Creating a vcsim server") + err := vspherevcsim.Create(ctx, bootstrapClusterProxy.GetClient()) + Expect(err).ToNot(HaveOccurred(), "Failed to create VCenterSimulator") + } + + By("Getting AddressClaim labels") + ipClaimLabels := vsphereip.GetIPAddressClaimLabels() + var ipClaimLabelsRaw []string + for k, v := range ipClaimLabels { + ipClaimLabelsRaw = append(ipClaimLabelsRaw, fmt.Sprintf("%s=%s", k, v)) } return []byte( @@ -223,38 +247,62 @@ var _ = SynchronizedBeforeSuite(func() []byte { configPath, clusterctlConfigPath, bootstrapClusterProxy.GetKubeconfigPath(), - strings.Join(ipamLabelsRaw, ";"), + strings.Join(ipClaimLabelsRaw, ";"), + testTarget, }, ","), ) }, func(data []byte) { // Before each ParallelNode. + parts := strings.Split(string(data), ",") - Expect(parts).To(HaveLen(5)) + Expect(parts).To(HaveLen(6)) artifactFolder = parts[0] configPath = parts[1] clusterctlConfigPath = parts[2] kubeconfigPath := parts[3] - ipamLabelsRaw := parts[4] + ipClaimLabelsRaw := parts[4] + testTarget = parts[5] namespaces = map[*corev1.Namespace]context.CancelFunc{} - By("Initializing the vSphere session to ensure credentials are working", initVSphereSession) + if testTarget == VCenterTestTarget { + // Some of the tests targeting VCenter relies on an additional VSphere session to check test progress; + // such session is create once, and shared across many tests. + // Some changes will be requires to get this working with vcsim e.g. about how to get the credentials/vCenter info, + // but we are deferring this to future work (if an and when necessary). + By("Initializing the vSphere session to ensure credentials are working", initVSphereSession) + } var err error e2eConfig, err = vsphereframework.LoadE2EConfig(ctx, configPath) Expect(err).NotTo(HaveOccurred()) + + // Add config overrides + drop vcsim relates provider/image if not necessary. + amendE2EConfig() + bootstrapClusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, initScheme(), framework.WithMachineLogCollector(vspherelog.MachineLogCollector{})) - ipamLabels := map[string]string{} - for _, s := range strings.Split(ipamLabelsRaw, ";") { + ipClaimLabels := map[string]string{} + for _, s := range strings.Split(ipClaimLabelsRaw, ";") { splittedLabel := strings.Split(s, "=") Expect(splittedLabel).To(HaveLen(2)) - ipamLabels[splittedLabel[0]] = splittedLabel[1] + ipClaimLabels[splittedLabel[0]] = splittedLabel[1] + } + + // Setup the in cluster address manager + switch testTarget { + case VCenterTestTarget: + // Create the in cluster address manager + inClusterAddressManager, err = vsphereip.InClusterAddressManager(e2eIPAMKubeconfig, ipClaimLabels, skipCleanup) + Expect(err).ToNot(HaveOccurred()) + + case VCSimTestTarget: + // Create the in vcsim address manager + vcsimAddressManager, err = vsphereip.VCSIMAddressManager(bootstrapClusterProxy.GetClient(), ipClaimLabels, skipCleanup) + Expect(err).ToNot(HaveOccurred()) } - ipAddressManager, err = vsphereip.InClusterAddressManager(e2eIPAMKubeconfig, ipamLabels, skipCleanup) - Expect(err).ToNot(HaveOccurred()) }) // Using a SynchronizedAfterSuite for controlling how to delete resources shared across ParallelNodes (~ginkgo threads). @@ -266,11 +314,25 @@ var _ = SynchronizedAfterSuite(func() { // After all ParallelNodes. if !skipCleanup { By("Cleaning up orphaned IPAddressClaims") - vSphereFolderName := e2eConfig.GetVariable("VSPHERE_FOLDER") - Expect(ipAddressManager.Teardown(ctx, vSphereFolderName, vsphereClient)).To(Succeed()) + switch testTarget { + case VCenterTestTarget: + // Cleanup the in cluster address manager + vSphereFolderName := e2eConfig.GetVariable("VSPHERE_FOLDER") + Expect(inClusterAddressManager.Teardown(ctx, vsphereip.MachineFolder(vSphereFolderName), vsphereip.VSphereClient(vsphereClient))).To(Succeed()) + + case VCSimTestTarget: + // Cleanup the vcsim address manager + Expect(vcsimAddressManager.Teardown(ctx)).To(Succeed()) + + // cleanup the vcsim server + Expect(vspherevcsim.Delete(ctx, bootstrapClusterProxy.GetClient(), skipCleanup)).To(Succeed()) + } + } + + if testTarget == VCenterTestTarget { + By("Cleaning up the vSphere session", terminateVSphereSession) } - By("Cleaning up the vSphere session", terminateVSphereSession) if !skipCleanup { By("Tearing down the management cluster") vsphereframework.TearDown(ctx, bootstrapClusterProvider, bootstrapClusterProxy) @@ -281,9 +343,51 @@ func initScheme() *runtime.Scheme { sc := runtime.NewScheme() framework.TryAddDefaultSchemes(sc) _ = infrav1.AddToScheme(sc) + _ = vcsimv1.AddToScheme(sc) return sc } +func amendE2EConfig() { + // If defined, load configOverrides. + // This can be used e.g. when working with a custom vCenter server for local testing (instead of the one in VMC used in CI). + if configOverridesPath != "" { + Expect(configOverridesPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config-overrides should be an existing file.") + + Byf("Merging with e2e config overrides from %q", configOverridesPath) + configData, err := os.ReadFile(configOverridesPath) //nolint:gosec + Expect(err).ToNot(HaveOccurred(), "Failed to read e2e config overrides") + Expect(configData).ToNot(BeEmpty(), "The e2e config overrides should not be empty") + + configOverrides := &configOverrides{} + Expect(yaml.Unmarshal(configData, configOverrides)).To(Succeed(), "Failed to convert e2e config overrides to yaml") + + for k, v := range configOverrides.Variables { + e2eConfig.Variables[k] = v + } + for k, v := range configOverrides.Intervals { + e2eConfig.Intervals[k] = v + } + } + + if testTarget == VCenterTestTarget { + // In case we are not testing vcsim, then drop the vcsim controller from providers and images. + // This ensures that all the tests not yet allowing to explicitly set vsphere as target infra provider keep working. + for i := range e2eConfig.Providers { + if e2eConfig.Providers[i].Name == "vcsim" { + e2eConfig.Providers = append(e2eConfig.Providers[:i], e2eConfig.Providers[i+1:]...) + break + } + } + + for i := range e2eConfig.Images { + if strings.Contains(e2eConfig.Images[i].Name, "cluster-api-vcsim-controller") { + e2eConfig.Images = append(e2eConfig.Images[:i], e2eConfig.Images[i+1:]...) + break + } + } + } +} + func setupSpecNamespace(specName string) *corev1.Namespace { Byf("Creating a namespace for hosting the %q test spec", specName) namespace, cancelWatches := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{ diff --git a/test/e2e/quick_start_test.go b/test/e2e/quick_start_test.go index 6444328f28..c4b52f097e 100644 --- a/test/e2e/quick_start_test.go +++ b/test/e2e/quick_start_test.go @@ -68,3 +68,36 @@ var _ = Describe("Cluster creation with [Ignition] bootstrap [PR-Blocking]", fun }) }) }) + +var _ = Describe("Cluster Creation using Cluster API quick-start test on vcsim [vcsim]", func() { + const specName = "quick-start-vcsim" // prefix (quick-start) copied from CAPI + Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { + return capi_e2e.QuickStartSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: ptr.To("vsphere"), + } + }) + }) +}) + +var _ = Describe("ClusterClass Creation using Cluster API quick-start test on vcsim [vcsim] [ClusterClass]", func() { + const specName = "quick-start-cluster-class-vcsim" // prefix (quick-start) copied from CAPI + Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { + return capi_e2e.QuickStartSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: ptr.To("topology"), + InfrastructureProvider: ptr.To("vsphere"), + } + }) + }) +}) diff --git a/test/framework/framework.go b/test/framework/framework.go index b248b46fa6..729151c851 100644 --- a/test/framework/framework.go +++ b/test/framework/framework.go @@ -31,6 +31,8 @@ import ( "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) +type ProviderConfig clusterctl.ProviderConfig + // Util functions to interact with the clusterctl e2e framework. func LoadE2EConfig(ctx context.Context, configPath string) (*clusterctl.E2EConfig, error) { diff --git a/test/framework/ip/addressmanager.go b/test/framework/ip/addressmanager.go index 3d4b908ffb..15cbf1e1c2 100644 --- a/test/framework/ip/addressmanager.go +++ b/test/framework/ip/addressmanager.go @@ -21,25 +21,26 @@ import ( "context" "github.com/vmware/govmomi" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1alpha1" ) -type AddressClaims []types.NamespacedName +var ipamScheme *runtime.Scheme -type AddressManager interface { - // ClaimIPs claims IP addresses with the variable name `CONTROL_PLANE_ENDPOINT_IP` and whatever is passed as - // additionalIPVariableNames. - // It returns a slice of IPAddressClaims namespaced names and corresponding variables. - ClaimIPs(ctx context.Context, opts ...ClaimOption) (claims AddressClaims, variables map[string]string) - - // Cleanup deletes the given IPAddressClaims. - Cleanup(ctx context.Context, claims AddressClaims) error +const ( + ControlPlaneEndpointIPVariable = "CONTROL_PLANE_ENDPOINT_IP" + controlPlaneEndpointPortVariable = "CONTROL_PLANE_ENDPOINT_PORT" +) - // Teardown tries to cleanup orphaned IPAddressClaims by checking if the corresponding IPs are still in use in vSphere. - // It identifies IPAddressClaims via labels. - Teardown(ctx context.Context, folderName string, vSphereClient *govmomi.Client) error +func init() { + ipamScheme = runtime.NewScheme() + _ = ipamv1.AddToScheme(ipamScheme) } +type AddressClaim types.NamespacedName +type AddressClaims []AddressClaim + type claimOptions struct { additionalIPVariableNames []string gatewayIPVariableName string @@ -61,3 +62,41 @@ func WithGateway(variableName string) ClaimOption { o.gatewayIPVariableName = variableName } } + +type teardownOptions struct { + folderName string + vSphereClient *govmomi.Client +} + +// TearDownOption is a configuration option supplied to Teardown. +type TearDownOption func(*teardownOptions) + +// MachineFolder instructs Teardown about where machines are located. +// NOTE: This option applies only to the in cluster address manager. +func MachineFolder(name string) TearDownOption { + return func(o *teardownOptions) { + o.folderName = name + } +} + +// VSphereClient provides Teardown a vCenter client. +// NOTE: This option applies only to the in cluster address manager. +func VSphereClient(c *govmomi.Client) TearDownOption { + return func(o *teardownOptions) { + o.vSphereClient = c + } +} + +type AddressManager interface { + // ClaimIPs claims IP addresses with the variable name `CONTROL_PLANE_ENDPOINT_IP` and whatever is passed as + // additionalIPVariableNames. + // It returns a slice of IPAddressClaims namespaced names and corresponding variables. + ClaimIPs(ctx context.Context, opts ...ClaimOption) (claims AddressClaims, variables map[string]string) + + // Cleanup deletes the given IPAddressClaims. + Cleanup(ctx context.Context, claims AddressClaims) error + + // Teardown tries to cleanup orphaned IPAddressClaims by checking if the corresponding IPs are still in use in vSphere. + // It identifies IPAddressClaims via labels. + Teardown(ctx context.Context, options ...TearDownOption) error +} diff --git a/test/framework/ip/incluster.go b/test/framework/ip/incluster.go index da3339269a..e335b75978 100644 --- a/test/framework/ip/incluster.go +++ b/test/framework/ip/incluster.go @@ -32,7 +32,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/wait" @@ -44,10 +43,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -var ipamScheme *runtime.Scheme - -const controlPlaneEndpointVariable = "CONTROL_PLANE_ENDPOINT_IP" - func init() { ipamScheme = runtime.NewScheme() _ = ipamv1.AddToScheme(ipamScheme) @@ -91,20 +86,19 @@ func (h *inCluster) ClaimIPs(ctx context.Context, opts ...ClaimOption) (AddressC } variables := map[string]string{} - ipAddressClaims := AddressClaims{} // Claim an IP per variable. - for _, variable := range append(options.additionalIPVariableNames, controlPlaneEndpointVariable) { + for _, variable := range append(options.additionalIPVariableNames, ControlPlaneEndpointIPVariable) { ip, ipAddressClaim, err := h.claimIPAddress(ctx) Expect(err).ToNot(HaveOccurred()) - ipAddressClaims = append(ipAddressClaims, types.NamespacedName{ + ipAddressClaims = append(ipAddressClaims, AddressClaim{ Namespace: ipAddressClaim.Namespace, Name: ipAddressClaim.Name, }) Byf("Setting clusterctl variable %s to %s", variable, ip.Spec.Address) variables[variable] = ip.Spec.Address - if variable == controlPlaneEndpointVariable && options.gatewayIPVariableName != "" { + if variable == ControlPlaneEndpointIPVariable && options.gatewayIPVariableName != "" { // Set the gateway variable if requested to the gateway of the control plane IP. // This is required in ipam scenarios, otherwise the VMs will not be able to // connect to the public internet to pull images. @@ -169,13 +163,18 @@ func GetIPAddressClaimLabels() map[string]string { // Teardown lists all IPAddressClaims matching the passed labels and deletes the IPAddressClaim // if there are no VirtualMachines in vCenter using the IP address. -func (h *inCluster) Teardown(ctx context.Context, folderName string, vSphereClient *govmomi.Client) error { +func (h *inCluster) Teardown(ctx context.Context, opts ...TearDownOption) error { + options := &teardownOptions{} + for _, o := range opts { + o(options) + } + if h.skipCleanup { By("Skipping cleanup of IPAddressClaims because skipCleanup is set to true") return nil } - virtualMachineIPAddresses, err := getVirtualMachineIPAddresses(ctx, folderName, vSphereClient) + virtualMachineIPAddresses, err := getVirtualMachineIPAddresses(ctx, options.folderName, options.vSphereClient) if err != nil { return err } @@ -188,7 +187,7 @@ func (h *inCluster) Teardown(ctx context.Context, folderName string, vSphereClie return err } - ipAddressClaimsToDelete := []types.NamespacedName{} + ipAddressClaimsToDelete := AddressClaims{} // Collect errors and skip these ip address claims, but report at the end. var errList []error @@ -209,7 +208,7 @@ func (h *inCluster) Teardown(ctx context.Context, folderName string, vSphereClie continue } - ipAddressClaimsToDelete = append(ipAddressClaimsToDelete, types.NamespacedName{ + ipAddressClaimsToDelete = append(ipAddressClaimsToDelete, AddressClaim{ Namespace: ipAddressClaim.Namespace, Name: ipAddressClaim.Name, }) diff --git a/test/framework/ip/noop.go b/test/framework/ip/noop.go index ae056f8055..d88dbdb4b6 100644 --- a/test/framework/ip/noop.go +++ b/test/framework/ip/noop.go @@ -20,7 +20,6 @@ import ( "context" . "github.com/onsi/ginkgo/v2" - "github.com/vmware/govmomi" ) var _ AddressManager = &noop{} @@ -36,7 +35,7 @@ func (h *noop) Cleanup(_ context.Context, _ AddressClaims) error { return nil } -func (*noop) Teardown(_ context.Context, _ string, _ *govmomi.Client) error { +func (*noop) Teardown(_ context.Context, _ ...TearDownOption) error { By("Skipping teardown of IPAddressClaims because of using ip.noop") return nil } diff --git a/test/framework/ip/vcsim.go b/test/framework/ip/vcsim.go new file mode 100644 index 0000000000..02a69b8a08 --- /dev/null +++ b/test/framework/ip/vcsim.go @@ -0,0 +1,201 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ip + +import ( + "context" + "fmt" + "os" + "strconv" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" + "sigs.k8s.io/controller-runtime/pkg/client" + + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +var _ AddressManager = &vcsim{} + +type vcsim struct { + labels map[string]string + client client.Client + skipCleanup bool +} + +// VCSIMAddressManager returns an ip.AddressManager implementation that leverage vcsim controller capabilities. +func VCSIMAddressManager(client client.Client, labels map[string]string, skipCleanup bool) (AddressManager, error) { + return &vcsim{ + labels: labels, + client: client, + skipCleanup: skipCleanup, + }, nil +} + +func (h *vcsim) ClaimIPs(ctx context.Context, opts ...ClaimOption) (AddressClaims, map[string]string) { + options := &claimOptions{} + for _, o := range opts { + o(options) + } + + variables := map[string]string{} + ipAddressClaims := AddressClaims{} + + // Claim an IP per variable. + for _, variable := range append(options.additionalIPVariableNames, ControlPlaneEndpointIPVariable) { + ip, port, ipAddressClaim, err := h.claimIPAddress(ctx) + Expect(err).ToNot(HaveOccurred()) + ipAddressClaims = append(ipAddressClaims, AddressClaim{ + Namespace: ipAddressClaim.Namespace, + Name: ipAddressClaim.Name, + }) + Byf("Setting clusterctl variable %s to %s", variable, ip) + variables[variable] = ip + + // All the vcsim controlPlaneEndpoints share the same ip, but have a different port, + // that we need to pass back as an additional variable. + // For the CONTROL_PLANE_ENDPOINT_IP variable, we are using the corresponding CONTROL_PLANE_ENDPOINT_PORT variable; + // for other variable names, we do a best effort replace of the _IP suffix with _PORT, or fail if there is no _IP suffix. + if variable == ControlPlaneEndpointIPVariable { + variables[controlPlaneEndpointPortVariable] = port + } else { + if !strings.HasSuffix(variable, "_IP") { + // might be we want to shift to a better error management here, but for now this should be enough to point in the right direction + panic(fmt.Sprintf("unable to claim vcsim controlPlaneEndpoint for variable name %s. variable name must end with _IP", variables)) + } + variables[strings.Replace(variable, "_IP", "_PORT", -1)] = port + } + } + + return ipAddressClaims, variables +} + +func (h *vcsim) Cleanup(ctx context.Context, ipAddressClaims AddressClaims) error { + if CurrentSpecReport().Failed() { + By("Skipping cleanup of vcsim ControlPlaneEndpoint because the tests failed and the IPs could still be in use") + return nil + } + + if h.skipCleanup { + By("Skipping cleanup of vcsim ControlPlaneEndpoint because skipCleanup is set to true") + return nil + } + + var errList []error + + for _, ipAddressClaim := range ipAddressClaims { + controlPlaneEndpoint := &vcsimv1.ControlPlaneEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: ipAddressClaim.Name, + Namespace: ipAddressClaim.Namespace, + }, + } + Byf("Deleting vcsim ControlPlaneEndpoint %s", klog.KObj(controlPlaneEndpoint)) + if err := h.client.Delete(ctx, controlPlaneEndpoint); err != nil && !apierrors.IsNotFound(err) { + errList = append(errList, err) + } + } + + if len(errList) > 0 { + return kerrors.NewAggregate(errList) + } + return nil +} + +// Teardown lists all ControlPlaneEndpoint matching the passed labels and deletes them. +func (h *vcsim) Teardown(ctx context.Context, _ ...TearDownOption) error { + if h.skipCleanup { + By("Skipping cleanup of vcsim ControlPlaneEndpoints because skipCleanup is set to true") + return nil + } + + // List all ControlPlaneEndpoint created matching the labels. + controlPlaneEndpoints := &vcsimv1.ControlPlaneEndpointList{} + if err := h.client.List(ctx, controlPlaneEndpoints, + client.MatchingLabels(h.labels), + client.InNamespace(metav1.NamespaceDefault), + ); err != nil { + return err + } + + ipAddressClaimsToDelete := AddressClaims{} + // Collect errors and skip these ip address claims, but report at the end. + for _, controlPlaneEndpoint := range controlPlaneEndpoints.Items { + ipAddressClaimsToDelete = append(ipAddressClaimsToDelete, AddressClaim{ + Namespace: controlPlaneEndpoint.Namespace, + Name: controlPlaneEndpoint.Name, + }) + } + return h.Cleanup(ctx, ipAddressClaimsToDelete) +} + +func (h *vcsim) claimIPAddress(ctx context.Context) (_, _ string, _ *vcsimv1.ControlPlaneEndpoint, err error) { + controlPlaneEndpoint := &vcsimv1.ControlPlaneEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ipclaim-" + rand.String(32), + Namespace: metav1.NamespaceDefault, + Labels: h.labels, + Annotations: map[string]string{}, + }, + Spec: vcsimv1.ControlPlaneEndpointSpec{}, + } + + // Set job name as annotation if environment variable is set. + if val := os.Getenv("JOB_NAME"); val != "" { + controlPlaneEndpoint.ObjectMeta.Annotations["prow.k8s.io/job"] = val + } + + // Create a ControlPlaneEndpoint + Byf("Creating vcsim ControlPlaneEndpoint %s", klog.KObj(controlPlaneEndpoint)) + if err := h.client.Create(ctx, controlPlaneEndpoint); err != nil { + return "", "", nil, err + } + + var retryError error + // Wait for the controlPlaneEndpoint to report an IPAddress. + _ = wait.PollUntilContextTimeout(ctx, time.Second, time.Second*30, true, func(ctx context.Context) (done bool, err error) { + if err := h.client.Get(ctx, client.ObjectKeyFromObject(controlPlaneEndpoint), controlPlaneEndpoint); err != nil { + retryError = errors.Wrap(err, "getting vcsim ControlPlaneEndpoint") + return false, nil + } + + if controlPlaneEndpoint.Status.Host == "" { + retryError = errors.New("vcsim ControlPlaneEndpoint.Status.Host is not set") + return false, nil + } + + retryError = nil + return true, nil + }) + if retryError != nil { + // Try best effort deletion of the unused controlPlaneEndpoint before returning an error. + _ = h.client.Delete(ctx, controlPlaneEndpoint) + return "", "", nil, retryError + } + + return controlPlaneEndpoint.Status.Host, strconv.Itoa(int(controlPlaneEndpoint.Status.Port)), controlPlaneEndpoint, nil +} diff --git a/test/framework/vcsim/server.go b/test/framework/vcsim/server.go new file mode 100644 index 0000000000..4876b861db --- /dev/null +++ b/test/framework/vcsim/server.go @@ -0,0 +1,113 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package vcsim provide helpers for vcsim controller. +package vcsim + +import ( + "context" + "time" + + . "github.com/onsi/ginkgo/v2" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" + "sigs.k8s.io/controller-runtime/pkg/client" + + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +const vcsimInstanceName = "vcsim-e2e" + +func Create(ctx context.Context, c client.Client) error { + vcsim := &vcsimv1.VCenterSimulator{ + ObjectMeta: metav1.ObjectMeta{ + Name: vcsimInstanceName, + Namespace: metav1.NamespaceDefault, + }, + Spec: vcsimv1.VCenterSimulatorSpec{}, + } + + Byf("Creating vcsim server %s", klog.KObj(vcsim)) + if err := c.Create(ctx, vcsim); err != nil { + return err + } + + if _, err := Get(ctx, c); err != nil { + // Try best effort deletion of the unused VCenterSimulator before returning an error. + _ = Delete(ctx, c, false) + return err + } + return nil +} + +func Get(ctx context.Context, c client.Client) (*vcsimv1.VCenterSimulator, error) { + vcsim := &vcsimv1.VCenterSimulator{ + ObjectMeta: metav1.ObjectMeta{ + Name: vcsimInstanceName, + Namespace: metav1.NamespaceDefault, + }, + Spec: vcsimv1.VCenterSimulatorSpec{}, + } + + var retryError error + // Wait for the Server to report an address. + _ = wait.PollUntilContextTimeout(ctx, time.Second, time.Second*30, true, func(ctx context.Context) (done bool, err error) { + if err := c.Get(ctx, client.ObjectKeyFromObject(vcsim), vcsim); err != nil { + retryError = errors.Wrap(err, "getting VCenterSimulator") + return false, nil + } + + if vcsim.Status.Host == "" { + retryError = errors.New("vcsim VCenterSimulator.Status.Host is not set") + return false, nil + } + + retryError = nil + return true, nil + }) + if retryError != nil { + return nil, retryError + } + return vcsim, nil +} + +func Delete(ctx context.Context, c client.Client, skipCleanup bool) error { + if CurrentSpecReport().Failed() { + By("Skipping cleanup of VCenterSimulator because the tests failed and the instance could still be in use") + return nil + } + + if skipCleanup { + By("Skipping cleanup of VCenterSimulator because skipCleanup is set to true") + return nil + } + + vcsim := &vcsimv1.VCenterSimulator{ + ObjectMeta: metav1.ObjectMeta{ + Name: vcsimInstanceName, + Namespace: metav1.NamespaceDefault, + }, + } + Byf("Deleting VCenterSimulator %s", klog.KObj(vcsim)) + if err := c.Delete(ctx, vcsim); err != nil && !apierrors.IsNotFound(err) { + return err + } + return nil +} diff --git a/test/framework/vmoperator/vmoperator.go b/test/framework/vmoperator/vmoperator.go index 211fd4d0bc..39cfc83225 100644 --- a/test/framework/vmoperator/vmoperator.go +++ b/test/framework/vmoperator/vmoperator.go @@ -117,6 +117,15 @@ type Dependencies struct { UserNamespace UserNamespaceConfig } +func (d *Dependencies) Variables() map[string]string { + return map[string]string{ + "VSPHERE_STORAGE_POLICY": d.VCenterCluster.StoragePolicy, + "VSPHERE_IMAGE_NAME": d.VCenterCluster.ContentLibrary.Item.Name, + "VSPHERE_STORAGE_CLASS": d.UserNamespace.StorageClass, + "VSPHERE_MACHINE_CLASS_NAME": d.UserNamespace.VirtualMachineClass, + } +} + // ReconcileDependencies reconciles dependencies for the vm-operator. // NOTE: This func is idempotent, it creates objects if missing otherwise it uses existing ones // (this will allow e.g. to update images once and re-use for many test run). diff --git a/test/infrastructure/vcsim/api/v1alpha1/envvar_types.go b/test/infrastructure/vcsim/api/v1alpha1/envvar_types.go index 167ad24a55..34257b7e3b 100644 --- a/test/infrastructure/vcsim/api/v1alpha1/envvar_types.go +++ b/test/infrastructure/vcsim/api/v1alpha1/envvar_types.go @@ -17,7 +17,12 @@ limitations under the License. package v1alpha1 import ( + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + vcsimhelpers "sigs.k8s.io/cluster-api-provider-vsphere/internal/test/helpers/vcsim" ) // EnvVarSpec defines the desired state of the EnvVar. @@ -98,3 +103,39 @@ type EnvVarList struct { func init() { objectTypes = append(objectTypes, &EnvVar{}, &EnvVarList{}) } + +func (c *ClusterEnvVarSpec) commonVariables() map[string]string { + return map[string]string{ + "VSPHERE_POWER_OFF_MODE": ptr.Deref(c.PowerOffMode, "trySoft"), + } +} + +// SupervisorVariables returns name/value pairs for a ClusterEnvVarSpec to be used for clusterctl templates when testing supervisor mode. +func (c *ClusterEnvVarSpec) SupervisorVariables() map[string]string { + return c.commonVariables() +} + +// GovmomiVariables returns name/value pairs for a ClusterEnvVarSpec to be used for clusterctl templates when testing govmomi mode. +func (c *ClusterEnvVarSpec) GovmomiVariables() map[string]string { + vars := c.commonVariables() + + datacenter := int(ptr.Deref(c.Datacenter, 0)) + datastore := int(ptr.Deref(c.Datastore, 0)) + cluster := int(ptr.Deref(c.Cluster, 0)) + + // Pick the template for the given Kubernetes version if any, otherwise the template for the latest + // version defined in the model. + template := vcsimhelpers.DefaultVMTemplates[len(vcsimhelpers.DefaultVMTemplates)-1] + if c.KubernetesVersion != nil { + template = fmt.Sprintf("ubuntu-2204-kube-%s", *c.KubernetesVersion) + } + + // NOTE: omitting cluster Name intentionally because E2E tests provide this value in other ways + vars["VSPHERE_DATACENTER"] = vcsimhelpers.DatacenterName(datacenter) + vars["VSPHERE_DATASTORE"] = vcsimhelpers.DatastoreName(datastore) + vars["VSPHERE_FOLDER"] = vcsimhelpers.VMFolderName(datacenter) + vars["VSPHERE_NETWORK"] = vcsimhelpers.NetworkPath(datacenter, vcsimhelpers.DefaultNetworkName) + vars["VSPHERE_RESOURCE_POOL"] = vcsimhelpers.ResourcePoolPath(datacenter, cluster) + vars["VSPHERE_TEMPLATE"] = vcsimhelpers.VMPath(datacenter, template) + return vars +} diff --git a/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go b/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go index 8879f52c88..94f9423b75 100644 --- a/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go +++ b/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go @@ -17,7 +17,12 @@ limitations under the License. package v1alpha1 import ( + "fmt" + "net" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + vcsimhelpers "sigs.k8s.io/cluster-api-provider-vsphere/internal/test/helpers/vcsim" ) const ( @@ -116,3 +121,36 @@ type VCenterSimulatorList struct { func init() { objectTypes = append(objectTypes, &VCenterSimulator{}, &VCenterSimulatorList{}) } + +func (v *VCenterSimulator) commonVariables() map[string]string { + host := v.Status.Host + + // NOTE: best effort reverting back to local host because the assumption is that the vcsim controller pod will be port-forwarded on local host + _, port, err := net.SplitHostPort(host) + if err == nil { + host = net.JoinHostPort("127.0.0.1", port) + } + + return map[string]string{ + "VSPHERE_PASSWORD": v.Status.Password, + "VSPHERE_USERNAME": v.Status.Username, + "VSPHERE_STORAGE_POLICY": vcsimhelpers.DefaultStoragePolicyName, + + // variables to set up govc for working with the vcsim instance. + "GOVC_URL": fmt.Sprintf("https://%s:%s@%s/sdk", v.Status.Username, v.Status.Password, host), + "GOVC_INSECURE": "true", + } +} + +// SupervisorVariables returns name/value pairs for a VCenterSimulator to be used for clusterctl templates when testing supervisor mode. +func (v *VCenterSimulator) SupervisorVariables() map[string]string { + return v.commonVariables() +} + +// GovmomiVariables returns name/value pairs for a VCenterSimulator to be used for clusterctl templates when testing govmomi mode. +func (v *VCenterSimulator) GovmomiVariables() map[string]string { + vars := v.commonVariables() + vars["VSPHERE_SERVER"] = fmt.Sprintf("https://%s", v.Status.Host) + vars["VSPHERE_TLS_THUMBPRINT"] = v.Status.Thumbprint + return vars +} diff --git a/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go b/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go index 1944bd5139..8c6937477e 100644 --- a/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go +++ b/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go @@ -92,21 +92,15 @@ func (r *ControlPlaneEndpointReconciler) reconcileNormal(ctx context.Context, co log := ctrl.LoggerFrom(ctx) log.Info("Reconciling VCSim ControlPlaneEndpoint") - // NOTE: The name of the ControlPlaneEndpoint should match the name of the Cluster. - resourceGroup := klog.KObj(controlPlaneEndpoint).String() - // Initialize a listener for the workload cluster. // IMPORTANT: The fact that both the listener and the resourceGroup for a workload cluster have // the same name is used as assumptions in other part of the implementation. - listener, err := r.APIServerMux.InitWorkloadClusterListener(resourceGroup) + listenerName := klog.KObj(controlPlaneEndpoint).String() + listener, err := r.APIServerMux.InitWorkloadClusterListener(listenerName) if err != nil { return errors.Wrapf(err, "failed to init the listener for the control plane endpoint") } - // Create a resource group for all the resources belonging the workload cluster. - // NOTE: We are storing in this resource group all the Kubernetes resources that are expected to exist on the workload cluster (e.g Nodes). - r.InMemoryManager.AddResourceGroup(resourceGroup) - controlPlaneEndpoint.Status.Host = r.PodIP // NOTE: we are replacing the listener ip with the pod ip so it will be accessible from other pods as well controlPlaneEndpoint.Status.Port = int32(listener.Port()) @@ -116,18 +110,18 @@ func (r *ControlPlaneEndpointReconciler) reconcileNormal(ctx context.Context, co func (r *ControlPlaneEndpointReconciler) reconcileDelete(ctx context.Context, controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint) error { log := ctrl.LoggerFrom(ctx) log.Info("Reconciling delete VCSim ControlPlaneEndpoint") + listenerName := klog.KObj(controlPlaneEndpoint).String() - // NOTE: The name of the ControlPlaneEndpoint should match the name of the Cluster. - resourceGroup := klog.KObj(controlPlaneEndpoint).String() + // Delete the resource group hosting all the cloud resources belonging the workload cluster; + if resourceGroup, err := r.APIServerMux.ResourceGroupByWorkloadCluster(listenerName); err == nil { + r.InMemoryManager.DeleteResourceGroup(resourceGroup) + } // Delete the listener for the workload cluster; - if err := r.APIServerMux.DeleteWorkloadClusterListener(resourceGroup); err != nil { + if err := r.APIServerMux.DeleteWorkloadClusterListener(listenerName); err != nil { return errors.Wrapf(err, "failed to delete the listener for the control plane endpoint") } - // Delete the resource group hosting all the cloud resources belonging the workload cluster; - r.InMemoryManager.DeleteResourceGroup(resourceGroup) - controllerutil.RemoveFinalizer(controlPlaneEndpoint, vcsimv1.ControlPlaneEndpointFinalizer) return nil diff --git a/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller_test.go b/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller_test.go index 6c48568de5..1e2e10eea4 100644 --- a/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller_test.go +++ b/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller_test.go @@ -20,7 +20,6 @@ import ( "testing" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" @@ -86,14 +85,8 @@ func Test_Reconcile_ControlPlaneEndpoint(t *testing.T) { g.Expect(controlPlaneEndpoint.Status.Port).ToNot(BeZero()) // Check manager and server internal status - resourceGroup := klog.KObj(controlPlaneEndpoint).String() - foo := &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - } - g.Expect(workloadClustersManager.GetResourceGroup(resourceGroup).GetClient().Create(ctx, foo)).To(Succeed()) // the operation succeed if the resource group has been created as expected - g.Expect(workloadClustersMux.ListListeners()).To(HaveKey(resourceGroup)) + listenerName := klog.KObj(controlPlaneEndpoint).String() + g.Expect(workloadClustersMux.ListListeners()).To(HaveKey(listenerName)) // PART 2: Should delete a ControlPlaneEndpoint @@ -106,8 +99,4 @@ func Test_Reconcile_ControlPlaneEndpoint(t *testing.T) { }}) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res).To(Equal(ctrl.Result{})) - - // Check manager and server internal status - g.Expect(workloadClustersManager.GetResourceGroup(resourceGroup).GetClient().Create(ctx, foo)).ToNot(Succeed()) // the operation fails if the resource group has been deleted as expected - g.Expect(workloadClustersMux.ListListeners()).ToNot(HaveKey(resourceGroup)) } diff --git a/test/infrastructure/vcsim/controllers/envvar_controller.go b/test/infrastructure/vcsim/controllers/envvar_controller.go index caf42bbcbe..8085480aa6 100644 --- a/test/infrastructure/vcsim/controllers/envvar_controller.go +++ b/test/infrastructure/vcsim/controllers/envvar_controller.go @@ -20,9 +20,7 @@ import ( "context" "crypto/rand" "crypto/rsa" - "fmt" "strconv" - "strings" "sync" "github.com/pkg/errors" @@ -37,7 +35,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - vcsimhelpers "sigs.k8s.io/cluster-api-provider-vsphere/internal/test/helpers/vcsim" vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" ) @@ -117,13 +114,20 @@ func (r *EnvVarReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ c } // Handle non-deleted EnvSubst - return ctrl.Result{}, r.reconcileNormal(ctx, envVar, vCenterSimulator, controlPlaneEndpoint) + return r.reconcileNormal(ctx, envVar, vCenterSimulator, controlPlaneEndpoint) } -func (r *EnvVarReconciler) reconcileNormal(ctx context.Context, envVar *vcsimv1.EnvVar, vCenterSimulator *vcsimv1.VCenterSimulator, controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint) error { +func (r *EnvVarReconciler) reconcileNormal(ctx context.Context, envVar *vcsimv1.EnvVar, vCenterSimulator *vcsimv1.VCenterSimulator, controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) log.Info("Reconciling VCSim EnvVar") + if controlPlaneEndpoint.Status.Host == "" { + return ctrl.Result{Requeue: true}, nil + } + if vCenterSimulator.Status.Host == "" { + return ctrl.Result{Requeue: true}, nil + } + r.lock.Lock() defer r.lock.Unlock() @@ -138,12 +142,12 @@ func (r *EnvVarReconciler) reconcileNormal(ctx context.Context, envVar *vcsimv1. privateKey, err := generatePrivateKey(bitSize) if err != nil { - return errors.Wrapf(err, "failed to generate private key") + return ctrl.Result{}, errors.Wrapf(err, "failed to generate private key") } publicKeyBytes, err := generatePublicKey(&privateKey.PublicKey) if err != nil { - return errors.Wrapf(err, "failed to generate public key") + return ctrl.Result{}, errors.Wrapf(err, "failed to generate public key") } sshKey = string(publicKeyBytes) @@ -151,12 +155,8 @@ func (r *EnvVarReconciler) reconcileNormal(ctx context.Context, envVar *vcsimv1. log.Info("Created ssh authorized key") } - // Common variables (used both in supervisor and govmomi mode) + // Variables required only when the vcsim controller is used in combination with Tilt (E2E tests provide this value in other ways) envVar.Status.Variables = map[string]string{ - // cluster template variables about the vcsim instance. - "VSPHERE_PASSWORD": vCenterSimulator.Status.Password, - "VSPHERE_USERNAME": vCenterSimulator.Status.Username, - // Variables for machines ssh key "VSPHERE_SSH_AUTHORIZED_KEY": sshKey, @@ -170,41 +170,37 @@ func (r *EnvVarReconciler) reconcileNormal(ctx context.Context, envVar *vcsimv1. // variables for the fake APIServer endpoint "CONTROL_PLANE_ENDPOINT_IP": controlPlaneEndpoint.Status.Host, "CONTROL_PLANE_ENDPOINT_PORT": strconv.Itoa(int(controlPlaneEndpoint.Status.Port)), - - // variables to set up govc for working with the vcsim instance. - "GOVC_URL": fmt.Sprintf("https://%s:%s@%s/sdk", vCenterSimulator.Status.Username, vCenterSimulator.Status.Password, strings.Replace(vCenterSimulator.Status.Host, r.PodIP, "127.0.0.1", 1)), // NOTE: reverting back to local host because the assumption is that the vcsim pod will be port-forwarded on local host - "GOVC_INSECURE": "true", } // Variables below are generated using the same utilities used both also for E2E tests setup. if r.SupervisorMode { - config := dependenciesForVCenterSimulator(vCenterSimulator) - - // Variables used only in supervisor mode - envVar.Status.Variables["VSPHERE_POWER_OFF_MODE"] = ptr.Deref(envVar.Spec.Cluster.PowerOffMode, "trySoft") + // variables for supervisor mode derived from the vCenterSimulator + for k, v := range vCenterSimulator.SupervisorVariables() { + envVar.Status.Variables[k] = v + } - envVar.Status.Variables["VSPHERE_STORAGE_POLICY"] = config.VCenterCluster.StoragePolicy - envVar.Status.Variables["VSPHERE_IMAGE_NAME"] = config.VCenterCluster.ContentLibrary.Item.Name - envVar.Status.Variables["VSPHERE_STORAGE_CLASS"] = config.UserNamespace.StorageClass - envVar.Status.Variables["VSPHERE_MACHINE_CLASS_NAME"] = config.UserNamespace.VirtualMachineClass + // Variables for supervisor mode derived from how do we setup dependency for vm-operator + for k, v := range dependenciesForVCenterSimulator(vCenterSimulator).Variables() { + envVar.Status.Variables[k] = v + } - return nil + // variables for supervisor mode derived from envVar.Spec.Cluster + for k, v := range envVar.Spec.Cluster.SupervisorVariables() { + envVar.Status.Variables[k] = v + } + return ctrl.Result{}, nil } - // Variables used only in govmomi mode - - // cluster template variables about the vcsim instance. - envVar.Status.Variables["VSPHERE_SERVER"] = fmt.Sprintf("https://%s", vCenterSimulator.Status.Host) - envVar.Status.Variables["VSPHERE_TLS_THUMBPRINT"] = vCenterSimulator.Status.Thumbprint - envVar.Status.Variables["VSPHERE_DATACENTER"] = vcsimhelpers.DatacenterName(int(ptr.Deref(envVar.Spec.Cluster.Datacenter, 0))) - envVar.Status.Variables["VSPHERE_DATASTORE"] = vcsimhelpers.DatastoreName(int(ptr.Deref(envVar.Spec.Cluster.Datastore, 0))) - envVar.Status.Variables["VSPHERE_FOLDER"] = fmt.Sprintf("/DC%d/vm", ptr.Deref(envVar.Spec.Cluster.Datacenter, 0)) - envVar.Status.Variables["VSPHERE_NETWORK"] = fmt.Sprintf("/DC%d/network/VM Network", ptr.Deref(envVar.Spec.Cluster.Datacenter, 0)) - envVar.Status.Variables["VSPHERE_RESOURCE_POOL"] = fmt.Sprintf("/DC%d/host/DC%[1]d_C%d/Resources", ptr.Deref(envVar.Spec.Cluster.Datacenter, 0), ptr.Deref(envVar.Spec.Cluster.Cluster, 0)) - envVar.Status.Variables["VSPHERE_STORAGE_POLICY"] = vcsimhelpers.DefaultStoragePolicyName - envVar.Status.Variables["VSPHERE_TEMPLATE"] = fmt.Sprintf("/DC%d/vm/%s", ptr.Deref(envVar.Spec.Cluster.Datacenter, 0), vcsimhelpers.DefaultVMTemplateName) + // variables for govmomi mode derived from the vCenterSimulator + for k, v := range vCenterSimulator.GovmomiVariables() { + envVar.Status.Variables[k] = v + } - return nil + // variables for govmomi mode derived from envVar.Spec.Cluster + for k, v := range envVar.Spec.Cluster.GovmomiVariables() { + envVar.Status.Variables[k] = v + } + return ctrl.Result{}, nil } func (r *EnvVarReconciler) reconcileDelete(_ context.Context, _ *vcsimv1.EnvVar, _ *vcsimv1.VCenterSimulator, _ *vcsimv1.ControlPlaneEndpoint) (ctrl.Result, error) { diff --git a/test/infrastructure/vcsim/controllers/vcsim_controller.go b/test/infrastructure/vcsim/controllers/vcsim_controller.go index 033c06bf3f..db3ed7a9e5 100644 --- a/test/infrastructure/vcsim/controllers/vcsim_controller.go +++ b/test/infrastructure/vcsim/controllers/vcsim_controller.go @@ -195,10 +195,10 @@ func (r *VCenterSimulatorReconciler) reconcileNormal(ctx context.Context, vCente vCenterSimulator.Status.Username = vcsimInstance.Username() vCenterSimulator.Status.Password = vcsimInstance.Password() - // Add a VM template + // Add a VM templates // Note: for the sake of testing with vcsim the template doesn't really matter (nor the version of K8s hosted on it) - // so we create only a VM template with a well-known name. - if err := createVMTemplate(ctx, vCenterSimulator); err != nil { + // but we must provide at least the templates that are expected by test cluster classes. + if err := createVMTemplates(ctx, vCenterSimulator); err != nil { return err } } @@ -223,7 +223,7 @@ func (r *VCenterSimulatorReconciler) reconcileNormal(ctx context.Context, vCente // - A set of Kubernetes object the vm-operator relies on // To mimic the supervisor cluster, there will be only one vm-operator instance for each management cluster; - // also, the logic below should consider that the instance of the vm-operator is bound to a specific vCenterSimulator cluster/user namespace. + // also, the logic below should consider that the instance of the vm-operator is bound to a specific vCenterSimulator cluster. config := dependenciesForVCenterSimulator(vCenterSimulator) if err := vmoperator.ReconcileDependencies(ctx, r.Client, config); err != nil { @@ -243,7 +243,7 @@ func (r *VCenterSimulatorReconciler) reconcileNormal(ctx context.Context, vCente return nil } -func createVMTemplate(ctx context.Context, vCenterSimulator *vcsimv1.VCenterSimulator) error { +func createVMTemplates(ctx context.Context, vCenterSimulator *vcsimv1.VCenterSimulator) error { log := ctrl.LoggerFrom(ctx) govcURL := fmt.Sprintf("https://%s:%s@%s/sdk", vCenterSimulator.Status.Username, vCenterSimulator.Status.Password, vCenterSimulator.Status.Host) @@ -256,17 +256,20 @@ func createVMTemplate(ctx context.Context, vCenterSimulator *vcsimv1.VCenterSimu if vCenterSimulator.Spec.Model != nil { datacenters = int(ptr.Deref(vCenterSimulator.Spec.Model.Datacenter, int32(simulator.VPX().Datacenter))) // VPX is the same base model used when creating vcsim } - for dc := 0; dc < datacenters; dc++ { - exit := cli.Run([]string{"vm.create", fmt.Sprintf("-ds=%s", vcsimhelpers.DatastoreName(datastore)), fmt.Sprintf("-cluster=%s", vcsimhelpers.ClusterName(dc, cluster)), fmt.Sprintf("-net=%s", vcsimhelpers.DefaultNetworkName), "-disk=20G", "-on=false", "-k=true", fmt.Sprintf("-u=%s", govcURL), vcsimhelpers.DefaultVMTemplateName}) - if exit != 0 { - return errors.New("failed to create vm template") - } - exit = cli.Run([]string{"vm.markastemplate", "-k=true", fmt.Sprintf("-u=%s", govcURL), vcsimhelpers.VMPath(dc, vcsimhelpers.DefaultVMTemplateName)}) - if exit != 0 { - return errors.New("failed to mark vm template") + for _, t := range vcsimhelpers.DefaultVMTemplates { + for dc := 0; dc < datacenters; dc++ { + exit := cli.Run([]string{"vm.create", fmt.Sprintf("-ds=%s", vcsimhelpers.DatastoreName(datastore)), fmt.Sprintf("-cluster=%s", vcsimhelpers.ClusterName(dc, cluster)), fmt.Sprintf("-net=%s", vcsimhelpers.DefaultNetworkName), "-disk=20G", "-on=false", "-k=true", fmt.Sprintf("-u=%s", govcURL), t}) + if exit != 0 { + return errors.New("failed to create vm template") + } + + exit = cli.Run([]string{"vm.markastemplate", "-k=true", fmt.Sprintf("-u=%s", govcURL), vcsimhelpers.VMPath(dc, t)}) + if exit != 0 { + return errors.New("failed to mark vm template") + } + log.Info("Created VM template", "name", t) } - log.Info("Created VM template", "name", vcsimhelpers.DefaultVMTemplateName) } return nil } @@ -296,11 +299,12 @@ func dependenciesForVCenterSimulator(vCenterSimulator *vcsimv1.VCenterSimulator) StoragePolicy: vcsimhelpers.DefaultStoragePolicyName, // Those are settings for a fake content library we are going to create given that it doesn't exists in vcsim by default. + // It contains a single dummy image. ContentLibrary: vmoperator.ContentLibraryConfig{ - Name: "kubernetes", + Name: "vcsim", Datastore: vcsimhelpers.DatastorePath(datacenter, datastore), Item: vmoperator.ContentLibraryItemConfig{ - Name: "test-image", + Name: "vcsim-default-image", Files: []vmoperator.ContentLibraryItemFilesConfig{ // TODO: check if we really need both { Name: "ttylinux-pc_i486-16.1.ovf", @@ -318,8 +322,8 @@ func dependenciesForVCenterSimulator(vCenterSimulator *vcsimv1.VCenterSimulator) // in the default namespace and to use the "vcsim-default" storage class. UserNamespace: vmoperator.UserNamespaceConfig{ Name: corev1.NamespaceDefault, - StorageClass: "test-storage-class", - VirtualMachineClass: "test-virtual-machine-class", + StorageClass: "vcsim-default-storage-class", + VirtualMachineClass: "vcsim-default-vm-class", }, } return config diff --git a/test/infrastructure/vcsim/controllers/virtualmachine_controller.go b/test/infrastructure/vcsim/controllers/virtualmachine_controller.go index 1075492e38..a1e703b597 100644 --- a/test/infrastructure/vcsim/controllers/virtualmachine_controller.go +++ b/test/infrastructure/vcsim/controllers/virtualmachine_controller.go @@ -47,6 +47,7 @@ import ( "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" "sigs.k8s.io/cluster-api-provider-vsphere/test/framework/vmoperator" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" ) type VirtualMachineReconciler struct { @@ -138,8 +139,34 @@ func (r *VirtualMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque ctx = ctrl.LoggerInto(ctx, log) // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() + r.InMemoryManager.AddResourceGroup(resourceGroup) + + if _, err := r.APIServerMux.WorkloadClusterByResourceGroup(resourceGroup); err != nil { + l := &vcsimv1.ControlPlaneEndpointList{} + if err := r.Client.List(ctx, l); err != nil { + return ctrl.Result{}, err + } + found := false + for _, c := range l.Items { + c := c + if c.Status.Host != cluster.Spec.ControlPlaneEndpoint.Host || c.Status.Port != cluster.Spec.ControlPlaneEndpoint.Port { + continue + } + + listenerName := klog.KObj(&c).String() + log.Info("Registering ResourceGroup for ControlPlaneEndpoint", "ResourceGroup", resourceGroup, "ControlPlaneEndpoint", listenerName) + err := r.APIServerMux.RegisterResourceGroup(listenerName, resourceGroup) + if err != nil { + return ctrl.Result{}, err + } + found = true + break + } + if !found { + return ctrl.Result{}, errors.Errorf("unable to find a ControlPlaneEndpoint for host %s, port %d", cluster.Spec.ControlPlaneEndpoint.Host, cluster.Spec.ControlPlaneEndpoint.Port) + } + } // Check if there is a conditionsTracker in the resource group. // The conditionsTracker is an object stored in memory with the scope of storing conditions used for keeping diff --git a/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go b/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go index c95181aa3d..f2e3c43803 100644 --- a/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go +++ b/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go @@ -119,12 +119,30 @@ func Test_Reconcile_VirtualMachine(t *testing.T) { err := inmemoryMgr.Start(ctx) g.Expect(err).ToNot(HaveOccurred()) - inmemoryMgr.AddResourceGroup(klog.KObj(cluster).String()) - inmemoryClient := inmemoryMgr.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + resourceGroupName := klog.KObj(cluster).String() + inmemoryMgr.AddResourceGroup(resourceGroupName) + inmemoryClient := inmemoryMgr.GetResourceGroup(resourceGroupName).GetClient() + + host := "127.0.0.1" + apiServerMux, err := inmemoryserver.NewWorkloadClustersMux(inmemoryMgr, host, inmemoryserver.CustomPorts{ + // NOTE: make sure to use ports different than other tests, so we can run tests in parallel + MinPort: inmemoryserver.DefaultMinPort, + MaxPort: inmemoryserver.DefaultMinPort + 99, + DebugPort: inmemoryserver.DefaultDebugPort, + }) + g.Expect(err).ToNot(HaveOccurred()) + + listenerName := "foo/bar" + _, err = apiServerMux.InitWorkloadClusterListener(listenerName) + g.Expect(err).ToNot(HaveOccurred()) + + err = apiServerMux.RegisterResourceGroup(listenerName, resourceGroupName) + g.Expect(err).ToNot(HaveOccurred()) r := VirtualMachineReconciler{ Client: crclient, InMemoryManager: inmemoryMgr, + APIServerMux: apiServerMux, } // Reconcile @@ -236,8 +254,9 @@ func Test_Reconcile_VirtualMachine(t *testing.T) { err := inmemoryMgr.Start(ctx) g.Expect(err).ToNot(HaveOccurred()) - inmemoryMgr.AddResourceGroup(klog.KObj(cluster).String()) - inmemoryClient := inmemoryMgr.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + resourceGroupName := klog.KObj(cluster).String() + inmemoryMgr.AddResourceGroup(resourceGroupName) + inmemoryClient := inmemoryMgr.GetResourceGroup(resourceGroupName).GetClient() // Start an http server apiServerMux, err := inmemoryserver.NewWorkloadClustersMux(inmemoryMgr, "127.0.0.1", inmemoryserver.CustomPorts{ @@ -248,6 +267,13 @@ func Test_Reconcile_VirtualMachine(t *testing.T) { }) g.Expect(err).ToNot(HaveOccurred()) + listenerName := "foo/bar" + _, err = apiServerMux.InitWorkloadClusterListener(listenerName) + g.Expect(err).ToNot(HaveOccurred()) + + err = apiServerMux.RegisterResourceGroup(listenerName, resourceGroupName) + g.Expect(err).ToNot(HaveOccurred()) + r := VirtualMachineReconciler{ Client: crclient, InMemoryManager: inmemoryMgr, diff --git a/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go b/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go index ce5ef85359..3f270e51f5 100644 --- a/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go +++ b/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go @@ -225,7 +225,6 @@ func (r *vmBootstrapReconciler) reconcileBoostrapNode(ctx context.Context, clust } // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() @@ -299,7 +298,6 @@ func (r *vmBootstrapReconciler) reconcileBoostrapETCD(ctx context.Context, clust } // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() @@ -376,7 +374,11 @@ func (r *vmBootstrapReconciler) reconcileBoostrapETCD(ctx context.Context, clust } // If there is not yet an etcd member listener for this machine, add it to the server. - if !r.APIServerMux.HasEtcdMember(resourceGroup, etcdMember) { + listenerName, err := r.APIServerMux.WorkloadClusterByResourceGroup(resourceGroup) + if err != nil { + return ctrl.Result{}, err + } + if !r.APIServerMux.HasEtcdMember(listenerName, etcdMember) { // Getting the etcd CA s, err := secret.Get(ctx, r.Client, client.ObjectKeyFromObject(cluster), secret.EtcdCA) if err != nil { @@ -402,7 +404,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapETCD(ctx context.Context, clust return ctrl.Result{}, errors.Wrapf(err, "invalid etcd CA: invalid %s", secret.TLSKeyDataName) } - if err := r.APIServerMux.AddEtcdMember(resourceGroup, etcdMember, cert, key.(*rsa.PrivateKey)); err != nil { + if err := r.APIServerMux.AddEtcdMember(listenerName, etcdMember, cert, key.(*rsa.PrivateKey)); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to start etcd member") } } @@ -440,7 +442,6 @@ func (r *vmBootstrapReconciler) reconcileBoostrapAPIServer(ctx context.Context, } // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() @@ -480,7 +481,11 @@ func (r *vmBootstrapReconciler) reconcileBoostrapAPIServer(ctx context.Context, } // If there is not yet an API server listener for this machine. - if !r.APIServerMux.HasAPIServer(resourceGroup, apiServer) { + listenerName, err := r.APIServerMux.WorkloadClusterByResourceGroup(resourceGroup) + if err != nil { + return ctrl.Result{}, err + } + if !r.APIServerMux.HasAPIServer(listenerName, apiServer) { // Getting the Kubernetes CA s, err := secret.Get(ctx, r.Client, client.ObjectKeyFromObject(cluster), secret.ClusterCA) if err != nil { @@ -508,7 +513,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapAPIServer(ctx context.Context, // Adding the APIServer. // NOTE: When the first APIServer is added, the workload cluster listener is started. - if err := r.APIServerMux.AddAPIServer(resourceGroup, apiServer, cert, key.(*rsa.PrivateKey)); err != nil { + if err := r.APIServerMux.AddAPIServer(listenerName, apiServer, cert, key.(*rsa.PrivateKey)); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to start API server") } } @@ -533,7 +538,6 @@ func (r *vmBootstrapReconciler) reconcileBoostrapScheduler(ctx context.Context, } // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() @@ -581,7 +585,6 @@ func (r *vmBootstrapReconciler) reconcileBoostrapControllerManager(ctx context.C } // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() @@ -621,7 +624,6 @@ func (r *vmBootstrapReconciler) reconcileBoostrapKubeadmObjects(ctx context.Cont } // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() @@ -691,7 +693,6 @@ func (r *vmBootstrapReconciler) reconcileBoostrapKubeProxy(ctx context.Context, // TODO: Add provisioning time for KubeProxy. // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() @@ -738,7 +739,6 @@ func (r *vmBootstrapReconciler) reconcileBoostrapCoredns(ctx context.Context, cl // TODO: Add provisioning time for CoreDNS. // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() @@ -824,7 +824,6 @@ func (r *vmBootstrapReconciler) reconcileDelete(ctx context.Context, cluster *cl func (r *vmBootstrapReconciler) reconcileDeleteNode(ctx context.Context, cluster *clusterv1.Cluster, _ *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() @@ -850,7 +849,6 @@ func (r *vmBootstrapReconciler) reconcileDeleteETCD(ctx context.Context, cluster } // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() @@ -864,7 +862,12 @@ func (r *vmBootstrapReconciler) reconcileDeleteETCD(ctx context.Context, cluster if err := inmemoryClient.Delete(ctx, etcdPod); err != nil && !apierrors.IsNotFound(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to delete etcd Pod") } - if err := r.APIServerMux.DeleteEtcdMember(resourceGroup, etcdMember); err != nil { + + listenerName, err := r.APIServerMux.WorkloadClusterByResourceGroup(resourceGroup) + if err != nil { + return ctrl.Result{}, err + } + if err := r.APIServerMux.DeleteEtcdMember(listenerName, etcdMember); err != nil { return ctrl.Result{}, err } @@ -883,7 +886,6 @@ func (r *vmBootstrapReconciler) reconcileDeleteAPIServer(ctx context.Context, cl } // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() @@ -897,7 +899,12 @@ func (r *vmBootstrapReconciler) reconcileDeleteAPIServer(ctx context.Context, cl if err := inmemoryClient.Delete(ctx, apiServerPod); err != nil && !apierrors.IsNotFound(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to delete apiServer Pod") } - if err := r.APIServerMux.DeleteAPIServer(resourceGroup, apiServer); err != nil { + + listenerName, err := r.APIServerMux.WorkloadClusterByResourceGroup(resourceGroup) + if err != nil { + return ctrl.Result{}, err + } + if err := r.APIServerMux.DeleteAPIServer(listenerName, apiServer); err != nil { return ctrl.Result{}, err } @@ -911,7 +918,6 @@ func (r *vmBootstrapReconciler) reconcileDeleteScheduler(ctx context.Context, cl } // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() @@ -935,7 +941,6 @@ func (r *vmBootstrapReconciler) reconcileDeleteControllerManager(ctx context.Con } // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() diff --git a/test/infrastructure/vcsim/controllers/vspherevm_controller.go b/test/infrastructure/vcsim/controllers/vspherevm_controller.go index a2910a1bdd..4572d9a598 100644 --- a/test/infrastructure/vcsim/controllers/vspherevm_controller.go +++ b/test/infrastructure/vcsim/controllers/vspherevm_controller.go @@ -45,6 +45,7 @@ import ( "sigs.k8s.io/cluster-api-provider-vsphere/pkg/identity" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" ) // TODO: implement support for CAPV deployed in arbitrary ns (TBD if we need this). @@ -140,8 +141,34 @@ func (r *VSphereVMReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( ctx = ctrl.LoggerInto(ctx, log) // Compute the resource group unique name. - // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() + r.InMemoryManager.AddResourceGroup(resourceGroup) + + if _, err := r.APIServerMux.WorkloadClusterByResourceGroup(resourceGroup); err != nil { + l := &vcsimv1.ControlPlaneEndpointList{} + if err := r.Client.List(ctx, l); err != nil { + return ctrl.Result{}, err + } + found := false + for _, c := range l.Items { + c := c + if c.Status.Host != cluster.Spec.ControlPlaneEndpoint.Host || c.Status.Port != cluster.Spec.ControlPlaneEndpoint.Port { + continue + } + + listenerName := klog.KObj(&c).String() + log.Info("Registering ResourceGroup for ControlPlaneEndpoint", "ResourceGroup", resourceGroup, "ControlPlaneEndpoint", listenerName) + err := r.APIServerMux.RegisterResourceGroup(listenerName, resourceGroup) + if err != nil { + return ctrl.Result{}, err + } + found = true + break + } + if !found { + return ctrl.Result{}, errors.Errorf("unable to find a ControlPlaneEndpoint for host %s, port %d", cluster.Spec.ControlPlaneEndpoint.Host, cluster.Spec.ControlPlaneEndpoint.Port) + } + } // Check if there is a conditionsTracker in the resource group. // The conditionsTracker is an object stored in memory with the scope of storing conditions used for keeping diff --git a/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go b/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go index bce242948e..8365f64b75 100644 --- a/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go +++ b/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go @@ -146,12 +146,30 @@ func Test_Reconcile_VSphereVM(t *testing.T) { err := inmemoryMgr.Start(ctx) g.Expect(err).ToNot(HaveOccurred()) - inmemoryMgr.AddResourceGroup(klog.KObj(cluster).String()) - inmemoryClient := inmemoryMgr.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + resourceGroupName := klog.KObj(cluster).String() + inmemoryMgr.AddResourceGroup(resourceGroupName) + inmemoryClient := inmemoryMgr.GetResourceGroup(resourceGroupName).GetClient() + + host := "127.0.0.1" + wcmux, err := inmemoryserver.NewWorkloadClustersMux(inmemoryMgr, host, inmemoryserver.CustomPorts{ + // NOTE: make sure to use ports different than other tests, so we can run tests in parallel + MinPort: inmemoryserver.DefaultMinPort + 400, + MaxPort: inmemoryserver.DefaultMinPort + 499, + DebugPort: inmemoryserver.DefaultDebugPort + 4, + }) + g.Expect(err).ToNot(HaveOccurred()) + + listenerName := "foo/bar" + _, err = wcmux.InitWorkloadClusterListener(listenerName) + g.Expect(err).ToNot(HaveOccurred()) + + err = wcmux.RegisterResourceGroup(listenerName, resourceGroupName) + g.Expect(err).ToNot(HaveOccurred()) r := VSphereVMReconciler{ Client: crclient, InMemoryManager: inmemoryMgr, + APIServerMux: wcmux, } // Reconcile @@ -263,8 +281,9 @@ func Test_Reconcile_VSphereVM(t *testing.T) { err := inmemoryMgr.Start(ctx) g.Expect(err).ToNot(HaveOccurred()) - inmemoryMgr.AddResourceGroup(klog.KObj(cluster).String()) - inmemoryClient := inmemoryMgr.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + resourceGroupName := klog.KObj(cluster).String() + inmemoryMgr.AddResourceGroup(resourceGroupName) + inmemoryClient := inmemoryMgr.GetResourceGroup(resourceGroupName).GetClient() // Start an http server apiServerMux, err := inmemoryserver.NewWorkloadClustersMux(inmemoryMgr, "127.0.0.1", inmemoryserver.CustomPorts{ @@ -275,6 +294,13 @@ func Test_Reconcile_VSphereVM(t *testing.T) { }) g.Expect(err).ToNot(HaveOccurred()) + listenerName := "foo/bar" + _, err = apiServerMux.InitWorkloadClusterListener(listenerName) + g.Expect(err).ToNot(HaveOccurred()) + + err = apiServerMux.RegisterResourceGroup(listenerName, resourceGroupName) + g.Expect(err).ToNot(HaveOccurred()) + r := VSphereVMReconciler{ Client: crclient, InMemoryManager: inmemoryMgr, diff --git a/test/infrastructure/vcsim/main.go b/test/infrastructure/vcsim/main.go index 12cfdf33d1..612277b1dd 100644 --- a/test/infrastructure/vcsim/main.go +++ b/test/infrastructure/vcsim/main.go @@ -34,7 +34,7 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/leaderelection/resourcelock" @@ -52,7 +52,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller" ctrlmgr "sigs.k8s.io/controller-runtime/pkg/manager" @@ -259,9 +258,9 @@ func main() { // Check for non-supervisor VSphereCluster and start controller if found gvr := infrav1.GroupVersion.WithResource(reflect.TypeOf(&infrav1.VSphereCluster{}).Elem().Name()) - nonSupervisorMode, err := isCRDDeployed(mgr, gvr) + govmomiMode, err := isCRDDeployed(mgr, gvr) if err != nil { - setupLog.Error(err, "unable to detect supervisor mode") + setupLog.Error(err, "unable to detect govmomi mode") os.Exit(1) } @@ -274,8 +273,8 @@ func main() { } // Continuing startup does not make sense without having managers added. - if !nonSupervisorMode && !supervisorMode { - err := errors.New("neither supervisor nor non-supervisor CRDs detected") + if !govmomiMode && !supervisorMode { + err := errors.New("neither supervisor nor govmomi CRDs detected") setupLog.Error(err, "CAPV CRDs are not deployed yet, restarting") os.Exit(1) } @@ -391,17 +390,7 @@ func concurrency(c int) controller.Options { func isCRDDeployed(mgr ctrlmgr.Manager, gvr schema.GroupVersionResource) (bool, error) { _, err := mgr.GetRESTMapper().KindFor(gvr) if err != nil { - var discoveryErr *apiutil.ErrResourceDiscoveryFailed - ok := errors.As(errors.Unwrap(err), &discoveryErr) - if !ok { - return false, err - } - discoveryErrs := *discoveryErr - gvrErr, ok := discoveryErrs[gvr.GroupVersion()] - if !ok { - return false, err - } - if apierrors.IsNotFound(gvrErr) { + if meta.IsNoMatchError(err) { return false, nil } return false, err