diff --git a/Makefile b/Makefile index 94baf982f86c..e53dd857f55a 100644 --- a/Makefile +++ b/Makefile @@ -558,7 +558,7 @@ generate-doctoc: TRACE=$(TRACE) ./hack/generate-doctoc.sh .PHONY: generate-e2e-templates -generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v0.3 v0.4 v1.0 v1.5 v1.6 v1.7 main) ## Generate cluster templates for all versions +generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v0.3 v0.4 v1.0 v1.5 v1.6 v1.7 v1.8 main) ## Generate cluster templates for all versions DOCKER_TEMPLATES := test/e2e/data/infrastructure-docker INMEMORY_TEMPLATES := test/e2e/data/infrastructure-inmemory @@ -590,6 +590,11 @@ generate-e2e-templates-v1.7: $(KUSTOMIZE) $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.7/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.7/cluster-template.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.7/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.7/cluster-template-topology.yaml +.PHONY: generate-e2e-templates-v1.8 +generate-e2e-templates-v1.8: $(KUSTOMIZE) + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.8/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.8/cluster-template.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.8/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.8/cluster-template-topology.yaml + .PHONY: generate-e2e-templates-main generate-e2e-templates-main: $(KUSTOMIZE) $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template.yaml diff --git a/cmd/clusterctl/hack/create-local-repository.py b/cmd/clusterctl/hack/create-local-repository.py index 0c62a2285c9d..b391fa6ee515 100755 --- a/cmd/clusterctl/hack/create-local-repository.py +++ b/cmd/clusterctl/hack/create-local-repository.py @@ -54,36 +54,36 @@ providers = { 'cluster-api': { 'componentsFile': 'core-components.yaml', - 'nextVersion': 'v1.8.99', + 'nextVersion': 'v1.9.99', 'type': 'CoreProvider', }, 'bootstrap-kubeadm': { 'componentsFile': 'bootstrap-components.yaml', - 'nextVersion': 'v1.8.99', + 'nextVersion': 'v1.9.99', 'type': 'BootstrapProvider', 'configFolder': 'bootstrap/kubeadm/config/default', }, 'control-plane-kubeadm': { 'componentsFile': 'control-plane-components.yaml', - 'nextVersion': 'v1.8.99', + 'nextVersion': 'v1.9.99', 'type': 'ControlPlaneProvider', 'configFolder': 'controlplane/kubeadm/config/default', }, 'infrastructure-docker': { 'componentsFile': 'infrastructure-components-development.yaml', - 'nextVersion': 'v1.8.99', + 'nextVersion': 'v1.9.99', 'type': 'InfrastructureProvider', 'configFolder': 'test/infrastructure/docker/config/default', }, 'infrastructure-in-memory': { 'componentsFile': 'infrastructure-components-in-memory-development.yaml', - 'nextVersion': 'v1.8.99', + 'nextVersion': 'v1.9.99', 'type': 'InfrastructureProvider', 'configFolder': 'test/infrastructure/inmemory/config/default', }, 'runtime-extension-test': { 'componentsFile': 'runtime-extension-components-development.yaml', - 'nextVersion': 'v1.8.99', + 'nextVersion': 'v1.9.99', 'type': 'RuntimeExtensionProvider', 'configFolder': 'test/extension/config/default', }, diff --git a/docs/release/release-tasks.md b/docs/release/release-tasks.md index bfeddd4a1676..6a4de4ac74f1 100644 --- a/docs/release/release-tasks.md +++ b/docs/release/release-tasks.md @@ -97,10 +97,12 @@ is used for e.g. local development and e2e tests. We also modify tests so that t This comes down to changing occurrences of the old version to the new version, e.g. `v1.5` to `v1.6`: 1. Setup E2E tests for the new release: - 1. Goal is that we have clusterctl upgrade tests for the latest stable versions of each contract / for each supported branch. For `v1.6` this means: - * v1beta1: `v1.0`, `v1.4`, `v1.5` (will change with each new release) + 1. Goal is that we have clusterctl upgrade tests for all relevant upgrade cases + 1. Modify the test specs in `test/e2e/clusterctl_upgrade_test.go`. Please note the comments above each test case (look for `This test should be changed during "prepare main branch"`) + Please note that both `InitWithKubernetesVersion` and `WorkloadKubernetesVersion` should be the highest management cluster version supported by the respective Cluster API version. + 2. Please ping maintainers after these changes are made for a first round of feedback before continuing with the steps below. 2. Update providers in `docker.yaml`: - 1. Add a new `v1.6.0` entry. + 1. Add a new `v1.6` entry. 2. Remove providers that are not used anymore in clusterctl upgrade tests. 3. Change `v1.5.99` to `v1.6.99`. 3. Adjust `metadata.yaml`'s: @@ -112,17 +114,13 @@ This comes down to changing occurrences of the old version to the new version, e 4. Adjust cluster templates in `test/e2e/data/infrastructure-docker`: 1. Create a new `v1.6` folder. It should be created based on the `main` folder and only contain the templates we use in the clusterctl upgrade tests (as of today `cluster-template` and `cluster-template-topology`). 2. Remove old folders that are not used anymore in clusterctl upgrade tests. - 5. Modify the test specs in `test/e2e/clusterctl_upgrade_test.go` (according to the versions we want to test described above). - Please note that both `InitWithKubernetesVersion` and `WorkloadKubernetesVersion` should be the highest mgmt cluster version supported by the respective Cluster API version. + 5. Add a new Makefile target (e.g. `generate-e2e-templates-v1.6`) and potentially remove the Makefile target of versions that are not used anymore (if something was removed in 4.2) 2. Update `create-local-repository.py` and `tools/internal/tilt-prepare/main.go`: `v1.5.99` => `v1.6.99`. 3. Make sure all tests are green (also run `pull-cluster-api-e2e-full-main` and `pull-cluster-api-e2e-workload-upgrade-1-27-latest-main`). -4. Remove an unsupported release version of Cluster API from the Makefile target that generates e2e templates. For example, remove `v1.3` while working on `v1.6`. Prior art: -* 1.5 - https://github.com/kubernetes-sigs/cluster-api/pull/8430/files -* 1.6 - https://github.com/kubernetes-sigs/cluster-api/pull/9097/files -* 1.7 - https://github.com/kubernetes-sigs/cluster-api/pull/9799/files +* 1.9 - https://github.com/kubernetes-sigs/cluster-api/pull/11059 #### Create a new GitHub milestone for the next release diff --git a/hack/tools/internal/tilt-prepare/main.go b/hack/tools/internal/tilt-prepare/main.go index ae33188b1fa4..f5229199b473 100644 --- a/hack/tools/internal/tilt-prepare/main.go +++ b/hack/tools/internal/tilt-prepare/main.go @@ -71,7 +71,7 @@ const ( var ( // Defines the default version to be used for the provider CR if no version is specified in the tilt-provider.yaml|json file. - defaultProviderVersion = "v1.8.99" + defaultProviderVersion = "v1.9.99" // This data struct mirrors a subset of info from the providers struct in the tilt file // which is containing "hard-coded" tilt-provider.yaml files for the providers managed in the Cluster API repository. diff --git a/metadata.yaml b/metadata.yaml index c692cde2d684..f569bbeeb080 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -6,6 +6,9 @@ apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 kind: Metadata releaseSeries: + - major: 1 + minor: 9 + contract: v1beta1 - major: 1 minor: 8 contract: v1beta1 diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index 93b5dbf1839d..81bff623c362 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -38,6 +38,7 @@ var ( providerDockerPrefix = "docker:v%s" ) +// Note: This test should not be changed during "prepare main branch". var _ = Describe("When testing clusterctl upgrades (v0.3=>v1.5=>current)", func() { // We are testing v0.3=>v1.5=>current to ensure that old entries with v1alpha3 in managed files do not cause issues // as described in https://github.com/kubernetes-sigs/cluster-api/issues/10051. @@ -114,6 +115,7 @@ var _ = Describe("When testing clusterctl upgrades (v0.3=>v1.5=>current)", func( }) }) +// Note: This test should not be changed during "prepare main branch". var _ = Describe("When testing clusterctl upgrades (v0.4=>v1.6=>current)", func() { // We are testing v0.4=>v1.6=>current to ensure that old entries with v1alpha4 in managed files do not cause issues // as described in https://github.com/kubernetes-sigs/cluster-api/issues/10051. @@ -180,6 +182,7 @@ var _ = Describe("When testing clusterctl upgrades (v0.4=>v1.6=>current)", func( }) }) +// Note: This test should not be changed during "prepare main branch". var _ = Describe("When testing clusterctl upgrades (v1.0=>current)", func() { // Get v1.0 latest stable release version := "1.0" @@ -215,9 +218,10 @@ var _ = Describe("When testing clusterctl upgrades (v1.0=>current)", func() { }) }) -var _ = Describe("When testing clusterctl upgrades (v1.6=>current)", func() { - // Get v1.6 latest stable release - version := "1.6" +// Note: This test should be changed during "prepare main branch", it should test n-2 => current. +var _ = Describe("When testing clusterctl upgrades (v1.7=>current)", func() { + // Get v1.7 latest stable release + version := "1.7" stableRelease, err := GetStableReleaseOfMinor(ctx, version) Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", version) ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput { @@ -237,8 +241,8 @@ var _ = Describe("When testing clusterctl upgrades (v1.6=>current)", func() { InitWithInfrastructureProviders: []string{fmt.Sprintf(providerDockerPrefix, stableRelease)}, InitWithProvidersContract: "v1beta1", // Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version. - InitWithKubernetesVersion: "v1.29.0", - WorkloadKubernetesVersion: "v1.29.0", + InitWithKubernetesVersion: "v1.30.0", + WorkloadKubernetesVersion: "v1.30.0", MgmtFlavor: "topology", WorkloadFlavor: "", UseKindForManagementCluster: true, @@ -246,9 +250,10 @@ var _ = Describe("When testing clusterctl upgrades (v1.6=>current)", func() { }) }) -var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.6=>current) [ClusterClass]", func() { - // Get v1.6 latest stable release - version := "1.6" +// Note: This test should be changed during "prepare main branch", it should test n-2 => current. +var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.7=>current) [ClusterClass]", func() { + // Get v1.7 latest stable release + version := "1.7" stableRelease, err := GetStableReleaseOfMinor(ctx, version) Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", version) ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput { @@ -268,8 +273,8 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.6=>cur InitWithInfrastructureProviders: []string{fmt.Sprintf(providerDockerPrefix, stableRelease)}, InitWithProvidersContract: "v1beta1", // Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version. - InitWithKubernetesVersion: "v1.29.0", - WorkloadKubernetesVersion: "v1.29.0", + InitWithKubernetesVersion: "v1.30.0", + WorkloadKubernetesVersion: "v1.30.0", MgmtFlavor: "topology", WorkloadFlavor: "topology", UseKindForManagementCluster: true, @@ -277,9 +282,10 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.6=>cur }) }) -var _ = Describe("When testing clusterctl upgrades (v1.7=>current)", func() { - // Get v1.7 latest stable release - version := "1.7" +// Note: This test should be changed during "prepare main branch", it should test n-1 => current. +var _ = Describe("When testing clusterctl upgrades (v1.8=>current)", func() { + // Get v1.8 latest stable release + version := "1.8" stableRelease, err := GetStableReleaseOfMinor(ctx, version) Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", version) ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput { @@ -293,8 +299,8 @@ var _ = Describe("When testing clusterctl upgrades (v1.7=>current)", func() { InitWithBinary: fmt.Sprintf(clusterctlDownloadURL, stableRelease), InitWithProvidersContract: "v1beta1", // Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version. - InitWithKubernetesVersion: "v1.30.0", - WorkloadKubernetesVersion: "v1.30.0", + InitWithKubernetesVersion: "v1.31.0", + WorkloadKubernetesVersion: "v1.31.0", MgmtFlavor: "topology", WorkloadFlavor: "", UseKindForManagementCluster: true, @@ -302,9 +308,10 @@ var _ = Describe("When testing clusterctl upgrades (v1.7=>current)", func() { }) }) -var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.7=>current) [ClusterClass]", func() { - // Get v1.7 latest stable release - version := "1.7" +// Note: This test should be changed during "prepare main branch", it should test n-1 => current. +var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.8=>current) [ClusterClass]", func() { + // Get v1.8 latest stable release + version := "1.8" stableRelease, err := GetStableReleaseOfMinor(ctx, version) Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", version) ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput { @@ -318,8 +325,8 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.7=>cur InitWithBinary: fmt.Sprintf(clusterctlDownloadURL, stableRelease), InitWithProvidersContract: "v1beta1", // Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version. - InitWithKubernetesVersion: "v1.30.0", - WorkloadKubernetesVersion: "v1.30.0", + InitWithKubernetesVersion: "v1.31.0", + WorkloadKubernetesVersion: "v1.31.0", MgmtFlavor: "topology", WorkloadFlavor: "topology", UseKindForManagementCluster: true, @@ -327,9 +334,10 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.7=>cur }) }) -var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.7=>current) on K8S latest ci mgmt cluster [ClusterClass]", func() { - // Get v1.7 latest stable release - version := "1.7" +// Note: This test should be changed during "prepare main branch", it should test n-1 => current. +var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.8=>current) on K8S latest ci mgmt cluster [ClusterClass]", func() { + // Get v1.8 latest stable release + version := "1.8" stableRelease, err := GetStableReleaseOfMinor(ctx, version) Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", version) ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput { @@ -347,7 +355,7 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.7=>cur // Note: InitWithKubernetesVersion should be the latest of the next supported kubernetes version by the target Cluster API version. // Note: WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version. InitWithKubernetesVersion: initKubernetesVersion, - WorkloadKubernetesVersion: "v1.30.0", + WorkloadKubernetesVersion: "v1.31.0", MgmtFlavor: "topology", WorkloadFlavor: "topology", UseKindForManagementCluster: true, diff --git a/test/e2e/config/docker.yaml b/test/e2e/config/docker.yaml index 0a29acdf0390..8e8aa82f00ee 100644 --- a/test/e2e/config/docker.yaml +++ b/test/e2e/config/docker.yaml @@ -35,7 +35,7 @@ providers: - name: cluster-api type: CoreProvider versions: - - name: "{go://sigs.k8s.io/cluster-api@v0.3}" # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v0.3}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v0.3}/core-components.yaml" type: "url" contract: v1alpha3 @@ -44,7 +44,7 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v0.3/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v0.4}" # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v0.4}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v0.4}/core-components.yaml" type: "url" contract: v1alpha4 @@ -53,7 +53,7 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v0.4/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.0}" # supported release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.0}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.0}/core-components.yaml" type: "url" contract: v1beta1 @@ -62,7 +62,7 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1.0/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.5}" # supported release in the v1beta1 series; this is used for v1alpha3 --> v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.5}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.5}/core-components.yaml" type: "url" contract: v1beta1 @@ -71,7 +71,7 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1.5/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.6}" # supported release in the v1beta1 series; this is used for v1alpha4 --> v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.6}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.6}/core-components.yaml" type: "url" contract: v1beta1 @@ -80,16 +80,25 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1.6/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.7}" # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.7}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.7}/core-components.yaml" type: "url" contract: v1beta1 replacements: - - old: --metrics-addr=127.0.0.1:8080 - new: --metrics-addr=:8080 + - old: --metrics-addr=127.0.0.1:8080 + new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1.7/metadata.yaml" - - name: v1.8.99 # next; use manifest from source files + - name: "{go://sigs.k8s.io/cluster-api@v1.8}" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.8}/core-components.yaml" + type: "url" + contract: v1beta1 + replacements: + - old: --metrics-addr=127.0.0.1:8080 + new: --metrics-addr=:8080 + files: + - sourcePath: "../data/shared/v1.8/metadata.yaml" + - name: v1.9.99 # next; use manifest from source files value: ../../../config/default replacements: - old: "--leader-elect" @@ -100,7 +109,7 @@ providers: - name: kubeadm type: BootstrapProvider versions: - - name: "{go://sigs.k8s.io/cluster-api@v0.3}" # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v0.3}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v0.3}/bootstrap-components.yaml" type: "url" contract: v1alpha3 @@ -109,7 +118,7 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v0.3/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v0.4}" # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v0.4}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v0.4}/bootstrap-components.yaml" type: "url" contract: v1alpha4 @@ -118,7 +127,7 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v0.4/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.0}" # supported release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.0}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.0}/bootstrap-components.yaml" type: "url" contract: v1beta1 @@ -127,7 +136,7 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1.0/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.5}" # supported release in the v1beta1 series; this is used for v1alpha3 --> v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.5}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.5}/bootstrap-components.yaml" type: "url" contract: v1beta1 @@ -136,7 +145,7 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1.5/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.6}" # supported release in the v1beta1 series; this is used for v1alpha4 --> v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.6}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.6}/bootstrap-components.yaml" type: "url" contract: v1beta1 @@ -145,7 +154,7 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1.6/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.7}" # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.7}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.7}/bootstrap-components.yaml" type: "url" contract: v1beta1 @@ -154,7 +163,16 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1.7/metadata.yaml" - - name: v1.8.99 # next; use manifest from source files + - name: "{go://sigs.k8s.io/cluster-api@v1.8}" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.8}/bootstrap-components.yaml" + type: "url" + contract: v1beta1 + replacements: + - old: --metrics-addr=127.0.0.1:8080 + new: --metrics-addr=:8080 + files: + - sourcePath: "../data/shared/v1.8/metadata.yaml" + - name: v1.9.99 # next; use manifest from source files value: ../../../bootstrap/kubeadm/config/default replacements: - old: "--leader-elect" @@ -165,7 +183,7 @@ providers: - name: kubeadm type: ControlPlaneProvider versions: - - name: "{go://sigs.k8s.io/cluster-api@v0.3}" # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v0.3}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v0.3}/control-plane-components.yaml" type: "url" contract: v1alpha3 @@ -174,7 +192,7 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v0.3/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v0.4}" # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v0.4}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v0.4}/control-plane-components.yaml" type: "url" contract: v1alpha4 @@ -183,7 +201,7 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v0.4/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.0}" # supported release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.0}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.0}/control-plane-components.yaml" type: "url" contract: v1beta1 @@ -192,7 +210,7 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1.0/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.5}" # supported release in the v1beta1 series; this is used for v1alpha3 --> v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.5}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.5}/control-plane-components.yaml" type: "url" contract: v1beta1 @@ -201,7 +219,7 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1.5/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.6}" # supported release in the v1beta1 series; this is used for v1alpha4 --> v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.6}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.6}/control-plane-components.yaml" type: "url" contract: v1beta1 @@ -210,7 +228,7 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1.6/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.7}" # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.7}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.7}/control-plane-components.yaml" type: "url" contract: v1beta1 @@ -219,7 +237,16 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1.7/metadata.yaml" - - name: v1.8.99 # next; use manifest from source files + - name: "{go://sigs.k8s.io/cluster-api@v1.8}" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.8}/control-plane-components.yaml" + type: "url" + contract: v1beta1 + replacements: + - old: --metrics-addr=127.0.0.1:8080 + new: --metrics-addr=:8080 + files: + - sourcePath: "../data/shared/v1.8/metadata.yaml" + - name: v1.9.99 # next; use manifest from source files value: ../../../controlplane/kubeadm/config/default replacements: - old: "--leader-elect" @@ -230,7 +257,7 @@ providers: - name: docker type: InfrastructureProvider versions: - - name: "{go://sigs.k8s.io/cluster-api@v0.3}" # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v0.3}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v0.3}/infrastructure-components-development.yaml" type: "url" contract: v1alpha3 @@ -240,7 +267,7 @@ providers: files: - sourcePath: "../data/shared/v0.3/metadata.yaml" - sourcePath: "../data/infrastructure-docker/v0.3/cluster-template.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v0.4}" # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v0.4}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v0.4}/infrastructure-components-development.yaml" type: "url" contract: v1alpha4 @@ -250,7 +277,7 @@ providers: files: - sourcePath: "../data/shared/v0.4/metadata.yaml" - sourcePath: "../data/infrastructure-docker/v0.4/cluster-template.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.0}" # supported release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.0}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.0}/infrastructure-components-development.yaml" type: "url" contract: v1beta1 @@ -260,7 +287,7 @@ providers: files: - sourcePath: "../data/shared/v1.0/metadata.yaml" - sourcePath: "../data/infrastructure-docker/v1.0/cluster-template.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.5}" # supported release in the v1beta1 series; this is used for v1alpha3 --> v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.5}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.5}/infrastructure-components-development.yaml" type: "url" contract: v1beta1 @@ -272,7 +299,7 @@ providers: - sourcePath: "../data/infrastructure-docker/v1.5/cluster-template.yaml" - sourcePath: "../data/infrastructure-docker/v1.5/cluster-template-topology.yaml" - sourcePath: "../data/infrastructure-docker/v1.5/clusterclass-quick-start.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.6}" # supported release in the v1beta1 series; this is used for v1alpha4 --> v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.6}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.6}/infrastructure-components-development.yaml" type: "url" contract: v1beta1 @@ -284,7 +311,7 @@ providers: - sourcePath: "../data/infrastructure-docker/v1.6/cluster-template.yaml" - sourcePath: "../data/infrastructure-docker/v1.6/cluster-template-topology.yaml" - sourcePath: "../data/infrastructure-docker/v1.6/clusterclass-quick-start.yaml" - - name: "{go://sigs.k8s.io/cluster-api@v1.7}" # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + - name: "{go://sigs.k8s.io/cluster-api@v1.7}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.7}/infrastructure-components-development.yaml" type: "url" contract: v1beta1 @@ -296,7 +323,19 @@ providers: - sourcePath: "../data/infrastructure-docker/v1.7/cluster-template.yaml" - sourcePath: "../data/infrastructure-docker/v1.7/cluster-template-topology.yaml" - sourcePath: "../data/infrastructure-docker/v1.7/clusterclass-quick-start.yaml" - - name: v1.8.99 # next; use manifest from source files + - name: "{go://sigs.k8s.io/cluster-api@v1.8}" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.8}/infrastructure-components-development.yaml" + type: "url" + contract: v1beta1 + replacements: + - old: --metrics-addr=127.0.0.1:8080 + new: --metrics-addr=:8080 + files: + - sourcePath: "../data/shared/v1.8/metadata.yaml" + - sourcePath: "../data/infrastructure-docker/v1.8/cluster-template.yaml" + - sourcePath: "../data/infrastructure-docker/v1.8/cluster-template-topology.yaml" + - sourcePath: "../data/infrastructure-docker/v1.8/clusterclass-quick-start.yaml" + - name: v1.9.99 # next; use manifest from source files value: ../../../test/infrastructure/docker/config/default replacements: - old: "--leader-elect" @@ -326,7 +365,7 @@ providers: - name: in-memory type: InfrastructureProvider versions: - - name: v1.8.99 # next; use manifest from source files + - name: v1.9.99 # next; use manifest from source files value: ../../../test/infrastructure/inmemory/config/default replacements: - old: "--leader-elect" @@ -340,7 +379,7 @@ providers: - name: test-extension type: RuntimeExtensionProvider versions: - - name: v1.8.99 # next; use manifest from source files + - name: v1.9.99 # next; use manifest from source files value: ../../../test/extension/config/default replacements: - old: "--leader-elect" diff --git a/test/e2e/data/infrastructure-docker/v1.8/bases/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-docker/v1.8/bases/cluster-with-kcp.yaml new file mode 100644 index 000000000000..f6e60681e1d5 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.8/bases/cluster-with-kcp.yaml @@ -0,0 +1,89 @@ +--- +# DockerCluster object referenced by the Cluster object +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerCluster +metadata: + name: '${CLUSTER_NAME}' +spec: + failureDomains: + fd1: + controlPlane: true + fd2: + controlPlane: true + fd3: + controlPlane: true + fd4: + controlPlane: false + fd5: + controlPlane: false + fd6: + controlPlane: false + fd7: + controlPlane: false + fd8: + controlPlane: false +--- +# Cluster object with +# - Reference to the KubeadmControlPlane object +# - the label cni=${CLUSTER_NAME}-crs-0, so the cluster can be selected by the ClusterResourceSet. +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + labels: + cni: "${CLUSTER_NAME}-crs-0" +spec: + clusterNetwork: + services: + cidrBlocks: ['${DOCKER_SERVICE_CIDRS}'] + pods: + cidrBlocks: ['${DOCKER_POD_CIDRS}'] + serviceDomain: '${DOCKER_SERVICE_DOMAIN}' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: '${CLUSTER_NAME}' + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +# DockerMachineTemplate object referenced by the KubeadmControlPlane object +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" + # The DOCKER_PRELOAD_IMAGES variable gets set in self-hosted E2E tests to the list of images of the E2E configuration. + preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} +--- +# KubeadmControlPlane referenced by the Cluster object with +# - the label kcp-adoption.step2, because it should be created in the second step of the kcp-adoption test. +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: "${CLUSTER_NAME}-control-plane" + labels: + kcp-adoption.step2: "" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + machineTemplate: + infrastructureRef: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. + certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] + initConfiguration: + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + joinConfiguration: + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + version: "${KUBERNETES_VERSION}" diff --git a/test/e2e/data/infrastructure-docker/v1.8/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-docker/v1.8/bases/cluster-with-topology.yaml new file mode 100644 index 000000000000..1fa907f3aab9 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.8/bases/cluster-with-topology.yaml @@ -0,0 +1,75 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + namespace: default + labels: + cni: "${CLUSTER_NAME}-crs-0" +spec: + clusterNetwork: + services: + cidrBlocks: ['${DOCKER_SERVICE_CIDRS}'] + pods: + cidrBlocks: ['${DOCKER_POD_CIDRS}'] + serviceDomain: '${DOCKER_SERVICE_DOMAIN}' + topology: + class: "quick-start" + version: "${KUBERNETES_VERSION}" + controlPlane: + metadata: + labels: + Cluster.topology.controlPlane.label: "Cluster.topology.controlPlane.labelValue" + # Note: this label is propagated to Nodes. + Cluster.topology.controlPlane.label.node.cluster.x-k8s.io: "Cluster.topology.controlPlane.nodeLabelValue" + annotations: + Cluster.topology.controlPlane.annotation: "Cluster.topology.controlPlane.annotationValue" + nodeDeletionTimeout: "30s" + nodeVolumeDetachTimeout: "5m" + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + workers: + machineDeployments: + - class: "default-worker" + name: "md-0" + metadata: + labels: + Cluster.topology.machineDeployment.label: "Cluster.topology.machineDeployment.labelValue" + # Note: this label is propagated to Nodes. + Cluster.topology.machineDeployment.label.node.cluster.x-k8s.io: "Cluster.topology.machineDeployment.nodeLabelValue" + annotations: + Cluster.topology.machineDeployment.annotation: "Cluster.topology.machineDeployment.annotationValue" + nodeDeletionTimeout: "30s" + nodeVolumeDetachTimeout: "5m" + minReadySeconds: 5 + replicas: ${WORKER_MACHINE_COUNT} + failureDomain: fd4 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: "20%" + maxUnavailable: 0 + machinePools: + - class: "default-worker" + name: "mp-0" + metadata: + labels: + Cluster.topology.machinePool.label: "Cluster.topology.machinePool.labelValue" + # Note: this label is propagated to Nodes. + Cluster.topology.machinePool.label.node.cluster.x-k8s.io: "Cluster.topology.machinePool.nodeLabelValue" + annotations: + Cluster.topology.machinePool.annotation: "Cluster.topology.machinePool.annotationValue" + nodeDeletionTimeout: "30s" + nodeVolumeDetachTimeout: "5m" + minReadySeconds: 5 + replicas: ${WORKER_MACHINE_COUNT} + failureDomains: + - fd4 + variables: + # We set an empty value to use the default tag kubeadm init is using. + - name: etcdImageTag + value: "" + # We set an empty value to use the default tag kubeadm init is using. + - name: coreDNSImageTag + value: "" + - name: preLoadImages + # The DOCKER_PRELOAD_IMAGES variable gets set in self-hosted E2E tests to the list of images of the E2E configuration. + value: ${DOCKER_PRELOAD_IMAGES:-[]} diff --git a/test/e2e/data/infrastructure-docker/v1.8/bases/crs.yaml b/test/e2e/data/infrastructure-docker/v1.8/bases/crs.yaml new file mode 100644 index 000000000000..b1b61237dc62 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.8/bases/crs.yaml @@ -0,0 +1,24 @@ +--- +# ConfigMap object referenced by the ClusterResourceSet object and with +# the CNI resource defined in the test config file +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-0" +data: ${CNI_RESOURCES} +binaryData: +--- +# ClusterResourceSet object with +# a selector that targets all the Cluster with label cni=${CLUSTER_NAME}-crs-0 +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-0" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-0" + resources: + - name: "cni-${CLUSTER_NAME}-crs-0" + kind: ConfigMap diff --git a/test/e2e/data/infrastructure-docker/v1.8/bases/md.yaml b/test/e2e/data/infrastructure-docker/v1.8/bases/md.yaml new file mode 100644 index 000000000000..5d42a2cf5e6a --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.8/bases/md.yaml @@ -0,0 +1,51 @@ +--- +# DockerMachineTemplate referenced by the MachineDeployment and with +# - extraMounts for the docker sock, thus allowing self-hosting test +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" + # The DOCKER_PRELOAD_IMAGES variable gets set in self-hosted E2E tests to the list of images of the E2E configuration. + preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} +--- +# KubeadmConfigTemplate referenced by the MachineDeployment +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. +--- +# MachineDeployment object +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + failureDomain: fd4 diff --git a/test/e2e/data/infrastructure-docker/v1.8/cluster-template-topology/kustomization.yaml b/test/e2e/data/infrastructure-docker/v1.8/cluster-template-topology/kustomization.yaml new file mode 100644 index 000000000000..44c449548234 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.8/cluster-template-topology/kustomization.yaml @@ -0,0 +1,3 @@ +resources: + - ../bases/cluster-with-topology.yaml + - ../bases/crs.yaml diff --git a/test/e2e/data/infrastructure-docker/v1.8/cluster-template/kustomization.yaml b/test/e2e/data/infrastructure-docker/v1.8/cluster-template/kustomization.yaml new file mode 100644 index 000000000000..825df00723f8 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.8/cluster-template/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- ../bases/cluster-with-kcp.yaml +- ../bases/md.yaml +- ../bases/crs.yaml diff --git a/test/e2e/data/infrastructure-docker/v1.8/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/v1.8/clusterclass-quick-start.yaml new file mode 100644 index 000000000000..ced2d8a77f5b --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1.8/clusterclass-quick-start.yaml @@ -0,0 +1,667 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: quick-start +spec: + controlPlane: + metadata: + labels: + ClusterClass.controlPlane.label: "ClusterClass.controlPlane.labelValue" + annotations: + ClusterClass.controlPlane.annotation: "ClusterClass.controlPlane.annotationValue" + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: quick-start-control-plane + machineInfrastructure: + ref: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: quick-start-control-plane + machineHealthCheck: + maxUnhealthy: 100% + unhealthyConditions: + - type: e2e.remediation.condition + status: "False" + timeout: 20s + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerClusterTemplate + name: quick-start-cluster + workers: + machineDeployments: + - class: default-worker + template: + metadata: + labels: + ClusterClass.machineDeployment.label: "ClusterClass.machineDeployment.labelValue" + annotations: + ClusterClass.machineDeployment.annotation: "ClusterClass.machineDeployment.annotationValue" + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: quick-start-md-default-worker-bootstraptemplate + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: quick-start-default-worker-machinetemplate + machineHealthCheck: + maxUnhealthy: 100% + # We are intentionally not setting the 'unhealthyConditions' here to test that the field is optional. + machinePools: + - class: default-worker + template: + metadata: + labels: + ClusterClass.machinePool.label: "ClusterClass.machinePool.labelValue" + annotations: + ClusterClass.machinePool.annotation: "ClusterClass.machinePool.annotationValue" + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: quick-start-mp-default-worker-bootstraptemplate + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachinePoolTemplate + name: quick-start-default-worker-machinepooltemplate + variables: + - name: lbImageRepository + required: true + schema: + openAPIV3Schema: + type: string + default: kindest + - name: etcdImageTag + required: true + # This metadata has just been added to verify that we can set metadata. + metadata: + labels: + testLabelKey: testLabelValue + annotations: + testAnnotationKey: testAnnotationValue + schema: + openAPIV3Schema: + type: string + default: "" + example: "3.5.3-0" + description: "etcdImageTag sets the tag for the etcd image." + # This metadata has just been added to verify that we can set metadata. + x-metadata: + labels: + testLabelKey: testXLabelValue + annotations: + testAnnotationKey: testXAnnotationValue + - name: coreDNSImageTag + required: true + schema: + openAPIV3Schema: + type: string + default: "" + example: "v1.8.5" + description: "coreDNSImageTag sets the tag for the coreDNS image." + - name: kubeadmControlPlaneMaxSurge + required: false + schema: + openAPIV3Schema: + type: string + default: "" + example: "0" + description: "kubeadmControlPlaneMaxSurge is the maximum number of control planes that can be scheduled above or under the desired number of control plane machines." + x-kubernetes-validations: + - rule: "self == \"\" || self != \"\"" + messageExpression: "'just a test expression, got %s'.format([self])" + - name: preLoadImages + required: false + schema: + openAPIV3Schema: + default: [] + type: array + items: + type: string + # This metadata has just been added to verify that we can set metadata. + x-metadata: + labels: + testLabelKey: testXLabelValue + annotations: + testAnnotationKey: testXAnnotationValue + description: "preLoadImages sets the images for the docker machines to preload." + - name: controlPlaneTaint + required: false + schema: + openAPIV3Schema: + type: boolean + default: true + - name: externalCloudProvider + required: false + schema: + openAPIV3Schema: + type: boolean + default: false + - name: ipv6Primary + required: false + schema: + openAPIV3Schema: + type: boolean + default: false + - name: kubeControlPlaneLogLevel + required: false + schema: + openAPIV3Schema: + type: string + description: "Log level for kube-apiserver, kube-scheduler and kube-controller-manager" + example: "2" + - name: kubeletLogLevel + required: false + schema: + openAPIV3Schema: + type: string + description: "Log level for kubelets on control plane and worker nodes" + example: "2" + patches: + - name: lbImageRepository + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerClusterTemplate + matchResources: + infrastructureCluster: true + jsonPatches: + - op: add + path: "/spec/template/spec/loadBalancer" + valueFrom: + template: | + imageRepository: {{ .lbImageRepository }} + - name: etcdImageTag + description: "Sets tag to use for the etcd image in the KubeadmControlPlane." + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/etcd" + valueFrom: + template: | + local: + imageTag: {{ .etcdImageTag }} + - name: coreDNSImageTag + description: "Sets tag to use for the etcd image in the KubeadmControlPlane." + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/dns" + valueFrom: + template: | + imageTag: {{ .coreDNSImageTag }} + - name: customImage + description: "Sets the container image that is used for running dockerMachines for the controlPlane and default-worker machineDeployments." + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + matchResources: + machineDeploymentClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/customImage" + valueFrom: + template: | + kindest/node:{{ .builtin.machineDeployment.version | replace "+" "_" }} + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/customImage" + valueFrom: + template: | + kindest/node:{{ .builtin.controlPlane.version | replace "+" "_" }} + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachinePoolTemplate + matchResources: + machinePoolClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/template/customImage" + valueFrom: + template: | + kindest/node:{{ .builtin.machinePool.version | replace "+" "_" }} + - name: preloadImages + description: | + Sets the container images to preload to the node that is used for running dockerMachines. + This is especially required for self-hosted e2e tests to ensure the required controller images to be available + and reduce load to public registries. + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + matchResources: + controlPlane: true + machineDeploymentClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/preLoadImages" + valueFrom: + variable: preLoadImages + - name: preloadImagesMP + description: | + Sets the container images to preload to the node that is used for running dockerMachines. + This is especially required for self-hosted e2e tests to ensure the required controller images to be available + and reduce load to public registries. + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachinePoolTemplate + matchResources: + machinePoolClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/template/preLoadImages" + valueFrom: + variable: preLoadImages + - name: kubeadmControlPlaneMaxSurge + description: "Sets the maxSurge value used for rolloutStrategy in the KubeadmControlPlane." + enabledIf: '{{ ne .kubeadmControlPlaneMaxSurge "" }}' + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/rolloutStrategy/rollingUpdate/maxSurge + valueFrom: + template: "{{ .kubeadmControlPlaneMaxSurge }}" + - name: controlPlaneTaint + enabledIf: "{{ not .controlPlaneTaint }}" + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/taints" + value: [] + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/taints" + value: [] + - name: controlPlaneExternalCloudProvider + enabledIf: "{{ .externalCloudProvider }}" + description: "Configures kubelet to run with an external cloud provider for control plane nodes." + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cloud-provider" + value: "external" + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/cloud-provider" + value: "external" + - name: machineDeploymentExternalCloudProvider + enabledIf: "{{ .externalCloudProvider }}" + description: "Configures kubelet to run with an external cloud provider for machineDeployment nodes." + definitions: + - selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - '*-worker' + jsonPatches: + - op: add + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cloud-provider" + value: "external" + - selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machinePoolClass: + names: + - '*-worker' + jsonPatches: + - op: add + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cloud-provider" + value: "external" + - name: localEndpointIPv6 + enabledIf: "{{ .ipv6Primary }}" + description: "Configures KCP to use IPv6 for its localAPIEndpoint." + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/localAPIEndpoint" + value: + advertiseAddress: '::' + - name: podSecurityStandard + description: "Adds an admission configuration for PodSecurity to the kube-apiserver." + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/admission-control-config-file" + value: "/etc/kubernetes/kube-apiserver-admission-pss.yaml" + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraVolumes" + value: + - name: admission-pss + hostPath: /etc/kubernetes/kube-apiserver-admission-pss.yaml + mountPath: /etc/kubernetes/kube-apiserver-admission-pss.yaml + readOnly: true + pathType: "File" + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/files" + valueFrom: + template: | + - content: | + apiVersion: apiserver.config.k8s.io/v1 + kind: AdmissionConfiguration + plugins: + - name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1{{ if semverCompare "< v1.25" .builtin.controlPlane.version }}beta1{{ end }} + kind: PodSecurityConfiguration + defaults: + enforce: "baseline" + enforce-version: "latest" + audit: "baseline" + audit-version: "latest" + warn: "baseline" + warn-version: "latest" + exemptions: + usernames: [] + runtimeClasses: [] + namespaces: [kube-system] + path: /etc/kubernetes/kube-apiserver-admission-pss.yaml + enabledIf: '{{ semverCompare ">= v1.24" .builtin.controlPlane.version }}' + - name: controlPlaneLogLevel + enabledIf: "{{ if .kubeControlPlaneLogLevel }}true{{end}}" + description: "Configures control plane components and kubelet to run at the log level specified in the variable `kubeControlPlaneLogLevel`." + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/v" + valueFrom: + variable: kubeControlPlaneLogLevel + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/v" + valueFrom: + variable: kubeControlPlaneLogLevel + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler/extraArgs/v" + valueFrom: + variable: kubeControlPlaneLogLevel + - name: controlPlaneKubeletLogLevel + enabledIf: "{{ if .kubeletLogLevel }}true{{end}}" + description: "Configures control plane kubelets to log at the level set in the variable `kubeletLogLevel`." + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/v" + valueFrom: + variable: kubeletLogLevel + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/v" + valueFrom: + variable: kubeletLogLevel + - name: workerKubeletLogLevel + enabledIf: "{{ if .kubeletLogLevel }}true{{end}}" + description: "Configures worker kubelets to log at the level set in the variable `kubeletLogLevel`." + definitions: + - selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - '*-worker' + jsonPatches: + - op: add + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/v" + valueFrom: + variable: kubeletLogLevel + - selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machinePoolClass: + names: + - '*-worker' + jsonPatches: + - op: add + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/v" + valueFrom: + variable: kubeletLogLevel +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerClusterTemplate +metadata: + name: quick-start-cluster + labels: + InfrastructureClusterTemplate.label: "InfrastructureClusterTemplate.labelValue" + annotations: + InfrastructureClusterTemplate.annotation: "InfrastructureClusterTemplate.annotationValue" +spec: + template: + metadata: + labels: + InfrastructureClusterTemplate.template.label: "InfrastructureClusterTemplate.template.labelValue" + annotations: + InfrastructureClusterTemplate.template.annotation: "InfrastructureClusterTemplate.template.annotationValue" + spec: + failureDomains: + fd1: + controlPlane: true + fd2: + controlPlane: true + fd3: + controlPlane: true + fd4: + controlPlane: false + fd5: + controlPlane: false + fd6: + controlPlane: false + fd7: + controlPlane: false + fd8: + controlPlane: false +--- +kind: KubeadmControlPlaneTemplate +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: quick-start-control-plane + labels: + ControlPlaneTemplate.label: "ControlPlaneTemplate.labelValue" + annotations: + ControlPlaneTemplate.annotation: "ControlPlaneTemplate.annotationValue" +spec: + template: + metadata: + labels: + ControlPlaneTemplate.template.label: "ControlPlaneTemplate.template.labelValue" + annotations: + ControlPlaneTemplate.template.annotation: "ControlPlaneTemplate.template.annotationValue" + spec: + rolloutBefore: + certificatesExpiryDays: 21 + machineTemplate: + metadata: + labels: + ControlPlaneTemplate.machineTemplate.label: "ControlPlaneTemplate.machineTemplate.labelValue" + annotations: + ControlPlaneTemplate.machineTemplate.annotation: "ControlPlaneTemplate.machineTemplate.annotationValue" + nodeDrainTimeout: 1s + kubeadmConfigSpec: + clusterConfiguration: + # extraArgs must be non-empty for control plane components to enable patches from ClusterClass to work. + controllerManager: + extraArgs: + v: "0" + scheduler: + extraArgs: + v: "0" + apiServer: + extraArgs: + v: "0" + # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. + certSANs: [localhost, host.docker.internal, "::", "::1", "127.0.0.1", "0.0.0.0"] + initConfiguration: + nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + joinConfiguration: + nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: quick-start-control-plane + labels: + InfraMachineTemplate.controlPlane.label: "InfraMachineTemplate.controlPlane.labelValue" + annotations: + InfraMachineTemplate.controlPlane.annotation: "InfraMachineTemplate.controlPlane.annotationValue" +spec: + template: + metadata: + labels: + InfraMachineTemplate.controlPlane.template.label: "InfraMachineTemplate.controlPlane.template.labelValue" + annotations: + InfraMachineTemplate.controlPlane.template.annotation: "InfraMachineTemplate.controlPlane.template.annotationValue" + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: quick-start-default-worker-machinetemplate + labels: + InfraMachineTemplate.machineDeployment.label: "InfraMachineTemplate.machineDeployment.labelValue" + annotations: + InfraMachineTemplate.machineDeployment.annotation: "InfraMachineTemplate.machineDeployment.annotationValue" +spec: + template: + metadata: + labels: + InfraMachineTemplate.machineDeployment.template.label: "InfraMachineTemplate.machineDeployment.template.labelValue" + annotations: + InfraMachineTemplate.machineDeployment.template.annotation: "InfraMachineTemplate.machineDeployment.template.annotationValue" + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachinePoolTemplate +metadata: + name: quick-start-default-worker-machinepooltemplate + labels: + InfraMachinePoolTemplate.machinePool.label: "InfraMachinePoolTemplate.machinePool.labelValue" + annotations: + InfraMachinePoolTemplate.machinePool.annotation: "InfraMachinePoolTemplate.machinePool.annotationValue" +spec: + template: + metadata: + labels: + InfraMachinePoolTemplate.machinePool.template.label: "InfraMachinePoolTemplate.machinePool.template.labelValue" + annotations: + InfraMachinePoolTemplate.machinePool.template.annotation: "InfraMachinePoolTemplate.machinePool.template.annotationValue" + spec: + template: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: quick-start-md-default-worker-bootstraptemplate + labels: + BootstrapConfigTemplate.machineDeployment.label: "BootstrapConfigTemplate.machineDeployment.labelValue" + annotations: + BootstrapConfigTemplate.machineDeployment.annotation: "BootstrapConfigTemplate.machineDeployment.annotationValue" +spec: + template: + metadata: + labels: + BootstrapConfigTemplate.machineDeployment.template.label: "BootstrapConfigTemplate.machineDeployment.template.labelValue" + annotations: + BootstrapConfigTemplate.machineDeployment.template.annotation: "BootstrapConfigTemplate.machineDeployment.template.annotationValue" + spec: + joinConfiguration: + nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider to work + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: quick-start-mp-default-worker-bootstraptemplate + labels: + BootstrapConfigTemplate.machinePool.label: "BootstrapConfigTemplate.machinePool.labelValue" + annotations: + BootstrapConfigTemplate.machinePool.annotation: "BootstrapConfigTemplate.machinePool.annotationValue" +spec: + template: + metadata: + labels: + BootstrapConfigTemplate.machinePool.template.label: "BootstrapConfigTemplate.machinePool.template.labelValue" + annotations: + BootstrapConfigTemplate.machinePool.template.annotation: "BootstrapConfigTemplate.machinePool.template.annotationValue" + spec: + joinConfiguration: + nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider to work + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' diff --git a/test/e2e/data/shared/main/metadata.yaml b/test/e2e/data/shared/main/metadata.yaml index 3cd15b78330c..213d1ce823fa 100644 --- a/test/e2e/data/shared/main/metadata.yaml +++ b/test/e2e/data/shared/main/metadata.yaml @@ -1,6 +1,9 @@ apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 kind: Metadata releaseSeries: + - major: 1 + minor: 9 + contract: v1beta1 - major: 1 minor: 8 contract: v1beta1 diff --git a/test/e2e/data/shared/v1.8/metadata.yaml b/test/e2e/data/shared/v1.8/metadata.yaml new file mode 100644 index 000000000000..3cd15b78330c --- /dev/null +++ b/test/e2e/data/shared/v1.8/metadata.yaml @@ -0,0 +1,36 @@ +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +kind: Metadata +releaseSeries: + - major: 1 + minor: 8 + contract: v1beta1 + - major: 1 + minor: 7 + contract: v1beta1 + - major: 1 + minor: 6 + contract: v1beta1 + - major: 1 + minor: 5 + contract: v1beta1 + - major: 1 + minor: 4 + contract: v1beta1 + - major: 1 + minor: 3 + contract: v1beta1 + - major: 1 + minor: 2 + contract: v1beta1 + - major: 1 + minor: 1 + contract: v1beta1 + - major: 1 + minor: 0 + contract: v1beta1 + - major: 0 + minor: 4 + contract: v1alpha4 + - major: 0 + minor: 3 + contract: v1alpha3