diff --git a/.gitignore b/.gitignore index 1ada11246d..2f22dc158d 100644 --- a/.gitignore +++ b/.gitignore @@ -15,8 +15,10 @@ _artifacts/ # E2E test templates -test/e2e/data/infrastructure-vsphere/**/cluster-template*.yaml +#!test/e2e/data/infrastructure-vsphere/v*/base/cluster-template.yaml test/e2e/data/infrastructure-vsphere/main/**/clusterclass-quick-start.yaml +test/e2e/data/infrastructure-vsphere/main/**/cluster-template*.yaml +test/e2e/data/infrastructure-vsphere/*/cluster-template*.yaml test/e2e/data/infrastructure-vsphere/*/clusterclass-quick-start.yaml # env vars file used in getting-started.md and manifests generation diff --git a/Makefile b/Makefile index 2b7ab278c8..d32da6b72a 100644 --- a/Makefile +++ b/Makefile @@ -293,7 +293,7 @@ generate-doctoc: TRACE=$(TRACE) ./hack/generate-doctoc.sh .PHONY: generate-e2e-templates -generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v1.7 v1.8 main) ## Generate test templates for all branches +generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v1.8 v1.9 main) ## Generate test templates for all branches .PHONY: generate-e2e-templates-main generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the main branch @@ -321,16 +321,16 @@ generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the mai "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/dhcp-overrides" > "$(E2E_TEMPLATE_DIR)/main/cluster-template-dhcp-overrides.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/ownerreferences" > "$(E2E_TEMPLATE_DIR)/main/cluster-template-ownerreferences.yaml" +.PHONY: generate-e2e-templates-v1.9 +generate-e2e-templates-v1.9: $(KUSTOMIZE) + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/v1.9/clusterclass" > "$(E2E_TEMPLATE_DIR)/v1.9/clusterclass-quick-start.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/v1.9/workload" > "$(E2E_TEMPLATE_DIR)/v1.9/cluster-template-workload.yaml" + .PHONY: generate-e2e-templates-v1.8 generate-e2e-templates-v1.8: $(KUSTOMIZE) "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/v1.8/clusterclass" > "$(E2E_TEMPLATE_DIR)/v1.8/clusterclass-quick-start.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/v1.8/workload" > "$(E2E_TEMPLATE_DIR)/v1.8/cluster-template-workload.yaml" -.PHONY: generate-e2e-templates-v1.7 -generate-e2e-templates-v1.7: $(KUSTOMIZE) - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/v1.7/clusterclass" > "$(E2E_TEMPLATE_DIR)/v1.7/clusterclass-quick-start.yaml" - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/v1.7/workload" > "$(E2E_TEMPLATE_DIR)/v1.7/cluster-template-workload.yaml" - .PHONY: generate-test-infra-prowjobs generate-test-infra-prowjobs: $(PROWJOB_GEN) ## Generates the prowjob configurations in test-infra @if [ -z "${TEST_INFRA_DIR}" ]; then echo "TEST_INFRA_DIR is not set"; exit 1; fi diff --git a/clusterctl-settings.json b/clusterctl-settings.json index 06d51188fb..6ddc636e4a 100644 --- a/clusterctl-settings.json +++ b/clusterctl-settings.json @@ -2,6 +2,6 @@ "name": "infrastructure-vsphere", "config": { "componentsFile": "infrastructure-components.yaml", - "nextVersion": "v1.9.99" + "nextVersion": "v1.10.99" } } diff --git a/docs/release/release-tasks.md b/docs/release/release-tasks.md index dd79d68d6a..94e722738f 100644 --- a/docs/release/release-tasks.md +++ b/docs/release/release-tasks.md @@ -44,12 +44,13 @@ This comes down to changing occurrences of the old version to the new version, e 1. Create a new `v1.7` folder. It should be created based on the `main` folder and only contain the templates we use in the clusterctl upgrade tests (as of today `remote-management`). 2. Remove old folders that are not used anymore in clusterctl upgrade tests. - 5. Modify the test specs in `test/e2e/capi_clusterctl_upgrade_test.go` (according to the versions we want to test described above). + 3. Copy over the workload folder from a previous release. + 5. Modify the test specs in `test/e2e/clusterctl_upgrade_test.go` (according to the versions we want to test described above). Please note that both `InitWithKubernetesVersion` and `WorkloadKubernetesVersion` should be the highest mgmt cluster version supported by the respective Cluster API version. 2. Update `clusterctl-settings.json`: `v1.7.99` => `v1.8.99`. 3. Make sure all tests are green (also run `pull-cluster-api-provider-vsphere-e2e-full-main` and `pull-cluster-api-provider-vsphere-conformance-main`). -Prior art: TODO(sbueringer): link example PR +Prior art: [🌱 Prepare main for development of the new release](https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/pull/2643) ## Remove previously deprecated code diff --git a/metadata.yaml b/metadata.yaml index b2d3e0c06a..bddb340f3f 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -36,3 +36,6 @@ releaseSeries: - major: 1 minor: 9 contract: v1beta1 + - major: 1 + minor: 10 + contract: v1beta1 diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index 61351895ed..41c8442caa 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -21,7 +21,7 @@ import ( capi_e2e "sigs.k8s.io/cluster-api/test/e2e" ) -var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.8=>current, CAPI 1.5=>1.6) [ClusterClass]", func() { +var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.9=>current, CAPI 1.6=>1.6) [ClusterClass]", func() { capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { return capi_e2e.ClusterctlUpgradeSpecInput{ E2EConfig: e2eConfig, @@ -30,22 +30,22 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.8= ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, MgmtFlavor: "remote-management", - InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.4/clusterctl-{OS}-{ARCH}", - InitWithCoreProvider: "cluster-api:v1.5.4", - InitWithBootstrapProviders: []string{"kubeadm:v1.5.4"}, - InitWithControlPlaneProviders: []string{"kubeadm:v1.5.4"}, - InitWithInfrastructureProviders: []string{"vsphere:v1.8.4"}, + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.1/clusterctl-{OS}-{ARCH}", + InitWithCoreProvider: "cluster-api:v1.6.1", + InitWithBootstrapProviders: []string{"kubeadm:v1.6.1"}, + InitWithControlPlaneProviders: []string{"kubeadm:v1.6.1"}, + InitWithInfrastructureProviders: []string{"vsphere:v1.9.0"}, InitWithRuntimeExtensionProviders: []string{}, // InitWithKubernetesVersion should be the highest kubernetes version supported by the init Cluster API version. // This is to guarantee that both, the old and new CAPI version, support the defined version. - InitWithKubernetesVersion: "v1.28.0", - WorkloadKubernetesVersion: "v1.28.0", + InitWithKubernetesVersion: "v1.29.0", + WorkloadKubernetesVersion: "v1.29.0", WorkloadFlavor: "workload", } }) }) -var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.7=>current, CAPI 1.4=>1.6) [ClusterClass]", func() { +var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.8=>current, CAPI 1.5=>1.6) [ClusterClass]", func() { capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { return capi_e2e.ClusterctlUpgradeSpecInput{ E2EConfig: e2eConfig, @@ -54,16 +54,16 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.7= ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, MgmtFlavor: "remote-management", - InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/clusterctl-{OS}-{ARCH}", - InitWithCoreProvider: "cluster-api:v1.4.9", - InitWithBootstrapProviders: []string{"kubeadm:v1.4.9"}, - InitWithControlPlaneProviders: []string{"kubeadm:v1.4.9"}, - InitWithInfrastructureProviders: []string{"vsphere:v1.7.4"}, + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.4/clusterctl-{OS}-{ARCH}", + InitWithCoreProvider: "cluster-api:v1.5.4", + InitWithBootstrapProviders: []string{"kubeadm:v1.5.4"}, + InitWithControlPlaneProviders: []string{"kubeadm:v1.5.4"}, + InitWithInfrastructureProviders: []string{"vsphere:v1.8.4"}, InitWithRuntimeExtensionProviders: []string{}, // InitWithKubernetesVersion should be the highest kubernetes version supported by the init Cluster API version. // This is to guarantee that both, the old and new CAPI version, support the defined version. - InitWithKubernetesVersion: "v1.27.3", - WorkloadKubernetesVersion: "v1.27.3", + InitWithKubernetesVersion: "v1.28.0", + WorkloadKubernetesVersion: "v1.28.0", WorkloadFlavor: "workload", } }) diff --git a/test/e2e/config/vsphere-ci.yaml b/test/e2e/config/vsphere-ci.yaml index fbd4862977..688d527405 100644 --- a/test/e2e/config/vsphere-ci.yaml +++ b/test/e2e/config/vsphere-ci.yaml @@ -34,7 +34,7 @@ providers: type: "url" contract: v1beta1 files: - - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + - sourcePath: "../data/shared/v1.9/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -48,16 +48,6 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.4.9 - # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/core-components.yaml" - type: "url" - contract: v1beta1 - files: - - sourcePath: "../data/shared/v1.7/v1beta1/metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - name: kubeadm type: BootstrapProvider @@ -68,7 +58,7 @@ providers: type: "url" contract: v1beta1 files: - - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + - sourcePath: "../data/shared/v1.9/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -82,16 +72,6 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.4.9 - # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/bootstrap-components.yaml" - type: "url" - contract: v1beta1 - files: - - sourcePath: "../data/shared/v1.7/v1beta1/metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - name: kubeadm type: ControlPlaneProvider @@ -102,7 +82,7 @@ providers: type: "url" contract: v1beta1 files: - - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + - sourcePath: "../data/shared/v1.9/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -116,21 +96,11 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.4.9 - # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/control-plane-components.yaml" - type: "url" - contract: v1beta1 - files: - - sourcePath: "../data/shared/v1.7/v1beta1/metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - name: vsphere type: InfrastructureProvider versions: - - name: v1.9.99 + - name: v1.10.99 # Use manifest from source files value: ../../../../cluster-api-provider-vsphere/config/default contract: v1beta1 @@ -156,26 +126,26 @@ providers: - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/clusterclass-quick-start.yaml" - sourcePath: "../data/shared/main/v1beta1_provider/metadata.yaml" - - name: v1.8.4 + - name: v1.9.0 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/v1.8.4/infrastructure-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/v1.9.0/infrastructure-components.yaml" type: "url" contract: v1beta1 files: # Add a cluster template - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.8/cluster-template-workload.yaml" - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.8/clusterclass-quick-start.yaml" - - sourcePath: "../data/shared/v1.8/v1beta1_provider/metadata.yaml" - - name: v1.7.4 + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.9/cluster-template-workload.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.9/clusterclass-quick-start.yaml" + - sourcePath: "../data/shared/v1.9/v1beta1_provider/metadata.yaml" + - name: v1.8.4 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/v1.7.4/infrastructure-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/v1.8.4/infrastructure-components.yaml" type: "url" contract: v1beta1 files: # Add a cluster template - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.7/cluster-template-workload.yaml" - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.7/clusterclass-quick-start.yaml" - - sourcePath: "../data/shared/v1.7/v1beta1_provider/metadata.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.8/cluster-template-workload.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.8/clusterclass-quick-start.yaml" + - sourcePath: "../data/shared/v1.8/v1beta1_provider/metadata.yaml" variables: KUBERNETES_VERSION: "v1.29.0" diff --git a/test/e2e/config/vsphere-dev.yaml b/test/e2e/config/vsphere-dev.yaml index bb6eede9e4..41d6e64b21 100644 --- a/test/e2e/config/vsphere-dev.yaml +++ b/test/e2e/config/vsphere-dev.yaml @@ -37,7 +37,7 @@ providers: type: "url" contract: v1beta1 files: - - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + - sourcePath: "../data/shared/v1.9/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -51,16 +51,6 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.4.9 - # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/core-components.yaml" - type: "url" - contract: v1beta1 - files: - - sourcePath: "../data/shared/v1.7/v1beta1/metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - name: kubeadm type: BootstrapProvider @@ -71,7 +61,7 @@ providers: type: "url" contract: v1beta1 files: - - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + - sourcePath: "../data/shared/v1.9/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -85,16 +75,6 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.4.9 - # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/bootstrap-components.yaml" - type: "url" - contract: v1beta1 - files: - - sourcePath: "../data/shared/v1.7/v1beta1/metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - name: kubeadm type: ControlPlaneProvider @@ -105,7 +85,7 @@ providers: type: "url" contract: v1beta1 files: - - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + - sourcePath: "../data/shared/v1.9/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -119,21 +99,11 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.4.9 - # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/control-plane-components.yaml" - type: "url" - contract: v1beta1 - files: - - sourcePath: "../data/shared/v1.7/v1beta1/metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - name: vsphere type: InfrastructureProvider versions: - - name: v1.9.99 + - name: v1.10.99 # Use manifest from source files value: ../../../../cluster-api-provider-vsphere/config/default contract: v1beta1 @@ -159,26 +129,26 @@ providers: - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/clusterclass-quick-start.yaml" - sourcePath: "../data/shared/main/v1beta1_provider/metadata.yaml" - - name: v1.8.4 + - name: v1.9.0 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/v1.8.4/infrastructure-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/v1.9.0/infrastructure-components.yaml" type: "url" contract: v1beta1 files: # Add a cluster template - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.8/cluster-template-workload.yaml" - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.8/clusterclass-quick-start.yaml" - - sourcePath: "../data/shared/v1.8/v1beta1_provider/metadata.yaml" - - name: v1.7.4 + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.9/cluster-template-workload.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.9/clusterclass-quick-start.yaml" + - sourcePath: "../data/shared/v1.9/v1beta1_provider/metadata.yaml" + - name: v1.8.4 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/v1.7.4/infrastructure-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/v1.8.4/infrastructure-components.yaml" type: "url" contract: v1beta1 files: # Add a cluster template - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.7/cluster-template-workload.yaml" - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.7/clusterclass-quick-start.yaml" - - sourcePath: "../data/shared/v1.7/v1beta1_provider/metadata.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.8/cluster-template-workload.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.8/clusterclass-quick-start.yaml" + - sourcePath: "../data/shared/v1.8/v1beta1_provider/metadata.yaml" variables: KUBERNETES_VERSION: "v1.29.0" diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/commons/remove-storage-policy.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/commons/remove-storage-policy.yaml deleted file mode 100644 index 9e0cac085c..0000000000 --- a/test/e2e/data/infrastructure-vsphere/v1.7/commons/remove-storage-policy.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- op: remove - path: /spec/template/spec/storagePolicyName diff --git a/test/e2e/data/infrastructure-vsphere/v1.9/base/cluster-template.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/base/cluster-template.yaml new file mode 100644 index 0000000000..6113fc7dd6 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.9/base/cluster-template.yaml @@ -0,0 +1,1481 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: '${CLUSTER_NAME}' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: '${CLUSTER_NAME}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: 6443 + identityRef: + kind: Secret + name: '${CLUSTER_NAME}' + server: '${VSPHERE_SERVER}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: '${VSPHERE_DATACENTER}' + datastore: '${VSPHERE_DATASTORE}' + diskGiB: 25 + folder: '${VSPHERE_FOLDER}' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: '${VSPHERE_NETWORK}' + numCPUs: 2 + os: Linux + powerOffMode: trySoft + resourcePool: '${VSPHERE_RESOURCE_POOL}' + server: '${VSPHERE_SERVER}' + storagePolicyName: '${VSPHERE_STORAGE_POLICY}' + template: '${VSPHERE_TEMPLATE}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker + namespace: '${NAMESPACE}' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: '${VSPHERE_DATACENTER}' + datastore: '${VSPHERE_DATASTORE}' + diskGiB: 25 + folder: '${VSPHERE_FOLDER}' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: '${VSPHERE_NETWORK}' + numCPUs: 2 + os: Linux + powerOffMode: trySoft + resourcePool: '${VSPHERE_RESOURCE_POOL}' + server: '${VSPHERE_SERVER}' + storagePolicyName: '${VSPHERE_STORAGE_POLICY}' + template: '${VSPHERE_TEMPLATE}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: cp_enable + value: "true" + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE:=""} + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: port + value: "6443" + - name: vip_arp + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "true" + - name: svc_election + value: "true" + image: ghcr.io/kube-vip/kube-vip:v0.6.3 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + - mountPath: /etc/hosts + name: etchosts + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: File + name: kubeconfig + - hostPath: + path: /etc/kube-vip.hosts + type: File + name: etchosts + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + permissions: "0644" + - content: 127.0.0.1 localhost kubernetes + owner: root:root + path: /etc/kube-vip.hosts + permissions: "0644" + - content: | + #!/bin/bash + + # Copyright 2020 The Kubernetes Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + set -e + + # Configure the workaround required for kubeadm init with kube-vip: + # xref: https://github.com/kube-vip/kube-vip/issues/684 + + # Nothing to do for kubernetes < v1.29 + KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)" + if [[ "$KUBEADM_MINOR" -lt "29" ]]; then + return + fi + + IS_KUBEADM_INIT="false" + + # cloud-init kubeadm init + if [[ -f /run/kubeadm/kubeadm.yaml ]]; then + IS_KUBEADM_INIT="true" + fi + + # ignition kubeadm init + if [[ -f /etc/kubeadm.sh ]] && grep -q -e "kubeadm init" /etc/kubeadm.sh; then + IS_KUBEADM_INIT="true" + fi + + if [[ "$IS_KUBEADM_INIT" == "true" ]]; then + sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' \ + /etc/kubernetes/manifests/kube-vip.yaml + fi + owner: root:root + path: /etc/kube-vip-prepare.sh + permissions: "0700" + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + - /etc/kube-vip-prepare.sh + users: + - name: capv + sshAuthorizedKeys: + - '${VSPHERE_SSH_AUTHORIZED_KEY}' + sudo: ALL=(ALL) NOPASSWD:ALL + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: '${CLUSTER_NAME}' + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: '${KUBERNETES_VERSION}' +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: '${CLUSTER_NAME}-md-0' + namespace: '${NAMESPACE}' +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + users: + - name: capv + sshAuthorizedKeys: + - '${VSPHERE_SSH_AUTHORIZED_KEY}' + sudo: ALL=(ALL) NOPASSWD:ALL +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: '${CLUSTER_NAME}-md-0' + namespace: '${NAMESPACE}' +spec: + clusterName: '${CLUSTER_NAME}' + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: '${CLUSTER_NAME}-md-0' + clusterName: '${CLUSTER_NAME}' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: ${CLUSTER_NAME}-worker + version: '${KUBERNETES_VERSION}' +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: ${CLUSTER_NAME}-crs-0 + namespace: '${NAMESPACE}' +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + resources: + - kind: Secret + name: vsphere-config-secret + - kind: ConfigMap + name: csi-manifests + - kind: Secret + name: cloud-controller-manager + - kind: Secret + name: cloud-provider-vsphere-credentials + - kind: ConfigMap + name: cpi-manifests +--- +apiVersion: v1 +kind: Secret +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +stringData: + password: ${VSPHERE_PASSWORD} + username: ${VSPHERE_USERNAME} +--- +apiVersion: v1 +kind: Secret +metadata: + name: vsphere-config-secret + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: vsphere-config-secret + namespace: vmware-system-csi + stringData: + csi-vsphere.conf: |+ + [Global] + thumbprint = "${VSPHERE_TLS_THUMBPRINT}" + cluster-id = "${NAMESPACE}/${CLUSTER_NAME}" + + [VirtualCenter "${VSPHERE_SERVER}"] + user = "${VSPHERE_USERNAME}" + password = "${VSPHERE_PASSWORD}" + datacenters = "${VSPHERE_DATACENTER}" + + [Network] + public-network = "${VSPHERE_NETWORK}" + + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: v1 + kind: Namespace + metadata: + name: vmware-system-csi + --- + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + name: csi.vsphere.vmware.com + spec: + attachRequired: true + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-controller-role + rules: + - apiGroups: + - "" + resources: + - nodes + - pods + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumeclaims/status + verbs: + - patch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - create + - update + - delete + - patch + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - patch + - apiGroups: + - cns.vmware.com + resources: + - triggercsifullsyncs + verbs: + - create + - get + - update + - watch + - list + - apiGroups: + - cns.vmware.com + resources: + - cnsvspherevolumemigrations + verbs: + - create + - get + - list + - watch + - update + - delete + - apiGroups: + - cns.vmware.com + resources: + - cnsvolumeinfoes + verbs: + - create + - get + - list + - watch + - delete + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - create + - update + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + - apiGroups: + - cns.vmware.com + resources: + - cnsvolumeoperationrequests + verbs: + - create + - get + - list + - update + - delete + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - watch + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - create + - get + - list + - watch + - update + - delete + - patch + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents/status + verbs: + - update + - patch + - apiGroups: + - cns.vmware.com + resources: + - csinodetopologies + verbs: + - get + - update + - watch + - list + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-controller-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-controller-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: vmware-system-csi + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-node + namespace: vmware-system-csi + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-node-cluster-role + rules: + - apiGroups: + - cns.vmware.com + resources: + - csinodetopologies + verbs: + - create + - watch + - get + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-node-cluster-role-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-node-cluster-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-node + namespace: vmware-system-csi + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: vsphere-csi-node-role + namespace: vmware-system-csi + rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: vsphere-csi-node-binding + namespace: vmware-system-csi + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: vsphere-csi-node-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-node + namespace: vmware-system-csi + --- + apiVersion: v1 + data: + async-query-volume: "true" + block-volume-snapshot: "true" + cnsmgr-suspend-create-volume: "true" + csi-auth-check: "true" + csi-internal-generated-cluster-id: "true" + csi-migration: "true" + csi-windows-support: "true" + list-volumes: "true" + listview-tasks: "true" + max-pvscsi-targets-per-vm: "true" + multi-vcenter-csi-topology: "true" + online-volume-extend: "true" + pv-to-backingdiskobjectid-mapping: "false" + topology-preferential-datastores: "true" + trigger-csi-fullsync: "false" + kind: ConfigMap + metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: vmware-system-csi + --- + apiVersion: v1 + kind: Service + metadata: + labels: + app: vsphere-csi-controller + name: vsphere-csi-controller + namespace: vmware-system-csi + spec: + ports: + - name: ctlr + port: 2112 + protocol: TCP + targetPort: 2112 + - name: syncer + port: 2113 + protocol: TCP + targetPort: 2113 + selector: + app: vsphere-csi-controller + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vsphere-csi-controller + topologyKey: kubernetes.io/hostname + containers: + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + - --kube-api-qps=100 + - --kube-api-burst=100 + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-attacher:v4.3.0 + name: csi-attacher + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --timeout=300s + - --handle-volume-inuse-error=false + - --csi-address=$(ADDRESS) + - --kube-api-qps=100 + - --kube-api-burst=100 + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-resizer:v1.8.0 + name: csi-resizer + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: controller + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.1.0 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + periodSeconds: 180 + timeoutSeconds: 10 + name: vsphere-csi-controller + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + - containerPort: 2112 + name: prometheus + protocol: TCP + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.10.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --leader-election + - --leader-election-lease-duration=30s + - --leader-election-renew-deadline=20s + - --leader-election-retry-period=10s + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" + - name: GODEBUG + value: x509sha1=1 + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v3.1.0 + imagePullPolicy: Always + name: vsphere-syncer + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --kube-api-qps=100 + - --kube-api-burst=100 + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-provisioner:v3.5.0 + name: csi-provisioner + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --kube-api-qps=100 + - --kube-api-burst=100 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2 + name: csi-snapshotter + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: Default + priorityClassName: system-cluster-critical + serviceAccountName: vsphere-csi-controller + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: vsphere-config-secret + - emptyDir: {} + name: socket-dir + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node + namespace: vmware-system-csi + spec: + selector: + matchLabels: + app: vsphere-csi-node + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=/var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + - --mode=kubelet-registration-probe + initialDelaySeconds: 3 + name: node-driver-registrar + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: MAX_VOLUMES_PER_NODE + value: "59" + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: GODEBUG + value: x509sha1=1 + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODEGETINFO_WATCH_TIMEOUT_MINUTES + value: "1" + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.1.0 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: device-dir + - mountPath: /sys/block + name: blocks-dir + - mountPath: /sys/devices + name: sys-devices-dir + - args: + - --v=4 + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.10.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: plugin-dir + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: vsphere-csi-node + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: /dev + name: device-dir + - hostPath: + path: /sys/block + type: Directory + name: blocks-dir + - hostPath: + path: /sys/devices + type: Directory + name: sys-devices-dir + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node-windows + namespace: vmware-system-csi + spec: + selector: + matchLabels: + app: vsphere-csi-node-windows + template: + metadata: + labels: + app: vsphere-csi-node-windows + role: vsphere-csi-windows + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\csi.vsphere.vmware.com\\csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=C:\\var\\lib\\kubelet\\plugins\\csi.vsphere.vmware.com\\csi.sock + - --mode=kubelet-registration-probe + initialDelaySeconds: 3 + name: node-driver-registrar + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: MAX_VOLUMES_PER_NODE + value: "59" + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: DEBUG + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODEGETINFO_WATCH_TIMEOUT_MINUTES + value: "1" + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.1.0 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + volumeMounts: + - mountPath: C:\csi + name: plugin-dir + - mountPath: C:\var\lib\kubelet + name: pods-mount-dir + - mountPath: \\.\pipe\csi-proxy-volume-v1 + name: csi-proxy-volume-v1 + - mountPath: \\.\pipe\csi-proxy-filesystem-v1 + name: csi-proxy-filesystem-v1 + - mountPath: \\.\pipe\csi-proxy-disk-v1 + name: csi-proxy-disk-v1 + - mountPath: \\.\pipe\csi-proxy-system-v1alpha1 + name: csi-proxy-system-v1alpha1 + - args: + - --v=4 + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.10.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: plugin-dir + nodeSelector: + kubernetes.io/os: windows + priorityClassName: system-node-critical + serviceAccountName: vsphere-csi-node + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: C:\var\lib\kubelet\plugins_registry\ + type: Directory + name: registration-dir + - hostPath: + path: C:\var\lib\kubelet\plugins\csi.vsphere.vmware.com\ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: \var\lib\kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + type: "" + name: csi-proxy-disk-v1 + - hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + type: "" + name: csi-proxy-volume-v1 + - hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + type: "" + name: csi-proxy-filesystem-v1 + - hostPath: + path: \\.\pipe\csi-proxy-system-v1alpha1 + type: "" + name: csi-proxy-system-v1alpha1 + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate +kind: ConfigMap +metadata: + name: csi-manifests + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-controller-manager + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: service-account + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-provider-vsphere-credentials + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: secret + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + ${VSPHERE_SERVER}.password: ${VSPHERE_PASSWORD} + ${VSPHERE_SERVER}.username: ${VSPHERE_USERNAME} + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: role + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: cluster-role-binding + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + port: 443 + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' + vcenter: + ${VSPHERE_SERVER}: + datacenters: + - '${VSPHERE_DATACENTER}' + server: '${VSPHERE_SERVER}' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: role-binding + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + component: cloud-controller-manager + tier: control-plane + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + name: vsphere-cloud-controller-manager + template: + metadata: + labels: + component: cloud-controller-manager + name: vsphere-cloud-controller-manager + tier: control-plane + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: gcr.io/cloud-provider-vsphere/cpi/release/manager:${CPI_IMAGE_K8S_VERSION} + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + priorityClassName: system-node-critical + securityContext: + runAsUser: 1001 + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: cpi-manifests + namespace: '${NAMESPACE}' diff --git a/test/e2e/data/infrastructure-vsphere/v1.9/base/kustomization.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/base/kustomization.yaml new file mode 100644 index 0000000000..0639bc9b15 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.9/base/kustomization.yaml @@ -0,0 +1,15 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - cluster-template.yaml + - ../commons/cluster-resource-set.yaml +patchesStrategicMerge: + - ../commons/cluster-resource-set-label.yaml + - ../commons/cluster-network-CIDR.yaml + - ../commons/cluster-resource-set-csi-insecure.yaml +patches: + # We are dropping storage policy so we also have test coverage + # for normal provisioning via data stores. + - target: + kind: VSphereMachineTemplate + path: ../commons/remove-storage-policy.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/clusterclass/clusterclass-quick-start.yaml similarity index 75% rename from test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/clusterclass-quick-start.yaml rename to test/e2e/data/infrastructure-vsphere/v1.9/clusterclass/clusterclass-quick-start.yaml index 4ba06d45f5..709ed2f482 100644 --- a/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-vsphere/v1.9/clusterclass/clusterclass-quick-start.yaml @@ -37,6 +37,9 @@ spec: - op: add path: /spec/template/spec/kubeadmConfigSpec/files value: [] + - op: add + path: /spec/template/spec/kubeadmConfigSpec/postKubeadmCommands + value: [] selector: apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlaneTemplate @@ -46,6 +49,9 @@ spec: - op: add path: /spec/template/spec/files value: [] + - op: add + path: /spec/template/spec/postKubeadmCommands + value: [] selector: apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 kind: KubeadmConfigTemplate @@ -53,7 +59,7 @@ spec: machineDeploymentClass: names: - ${CLUSTER_CLASS_NAME}-worker - name: createFilesArray + name: createEmptyArrays - definitions: - jsonPatches: - op: add @@ -121,9 +127,73 @@ spec: path: /spec/template/spec/kubeadmConfigSpec/files/- valueFrom: template: |- - owner: root:root - path: "/etc/kubernetes/manifests/kube-vip.yaml" + owner: "root:root" + path: "/etc/kubernetes/manifests/kube-vip.yaml" content: {{ printf "%q" (regexReplaceAll "(name: address\n +value:).*" .kubeVipPodManifest (printf "$1 %s" .controlPlaneIpAddr)) }} + permissions: "0644" + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + content: 127.0.0.1 localhost kubernetes + owner: root:root + path: /etc/kube-vip.hosts + permissions: "0644" + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + content: | + #!/bin/bash + + # Copyright 2020 The Kubernetes Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + set -e + + # Configure the workaround required for kubeadm init with kube-vip: + # xref: https://github.com/kube-vip/kube-vip/issues/684 + + # Nothing to do for kubernetes < v1.29 + KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)" + if [[ "$KUBEADM_MINOR" -lt "29" ]]; then + return + fi + + IS_KUBEADM_INIT="false" + + # cloud-init kubeadm init + if [[ -f /run/kubeadm/kubeadm.yaml ]]; then + IS_KUBEADM_INIT="true" + fi + + # ignition kubeadm init + if [[ -f /etc/kubeadm.sh ]] && grep -q -e "kubeadm init" /etc/kubeadm.sh; then + IS_KUBEADM_INIT="true" + fi + + if [[ "$IS_KUBEADM_INIT" == "true" ]]; then + sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' \ + /etc/kubernetes/manifests/kube-vip.yaml + fi + owner: root:root + path: /etc/kube-vip-prepare.sh + permissions: "0700" + - op: add + path: /spec/template/spec/kubeadmConfigSpec/preKubeadmCommands/- + valueFrom: + template: /etc/kube-vip-prepare.sh selector: apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlaneTemplate @@ -203,6 +273,7 @@ spec: networkName: '${VSPHERE_NETWORK}' numCPUs: 2 os: Linux + powerOffMode: trySoft resourcePool: '${VSPHERE_RESOURCE_POOL}' server: '${VSPHERE_SERVER}' storagePolicyName: '${VSPHERE_STORAGE_POLICY}' @@ -229,6 +300,7 @@ spec: networkName: '${VSPHERE_NETWORK}' numCPUs: 2 os: Linux + powerOffMode: trySoft resourcePool: '${VSPHERE_RESOURCE_POOL}' server: '${VSPHERE_SERVER}' storagePolicyName: '${VSPHERE_STORAGE_POLICY}' diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/kustomization.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/clusterclass/kustomization.yaml similarity index 78% rename from test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/kustomization.yaml rename to test/e2e/data/infrastructure-vsphere/v1.9/clusterclass/kustomization.yaml index 820776eeaa..4c0e41b050 100644 --- a/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/kustomization.yaml +++ b/test/e2e/data/infrastructure-vsphere/v1.9/clusterclass/kustomization.yaml @@ -9,3 +9,6 @@ patches: - target: kind: ClusterClass path: ./patch-prekubeadmscript.yaml + - target: + kind: ClusterClass + path: ./patch-namingstrategy.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.9/clusterclass/patch-namingstrategy.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/clusterclass/patch-namingstrategy.yaml new file mode 100644 index 0000000000..1877801c3c --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.9/clusterclass/patch-namingstrategy.yaml @@ -0,0 +1,8 @@ +- op: add + path: /spec/controlPlane/namingStrategy + value: + template: '{{ .cluster.name }}-cp-{{ .random }}' +- op: add + path: /spec/workers/machineDeployments/0/namingStrategy + value: + template: '{{ .cluster.name }}-md-{{ .machineDeployment.topologyName }}-{{ .random }}' diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/patch-prekubeadmscript.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/clusterclass/patch-prekubeadmscript.yaml similarity index 100% rename from test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/patch-prekubeadmscript.yaml rename to test/e2e/data/infrastructure-vsphere/v1.9/clusterclass/patch-prekubeadmscript.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/patch-vsphere-template.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/clusterclass/patch-vsphere-template.yaml similarity index 100% rename from test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/patch-vsphere-template.yaml rename to test/e2e/data/infrastructure-vsphere/v1.9/clusterclass/patch-vsphere-template.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/commons/cluster-network-CIDR.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/commons/cluster-network-CIDR.yaml similarity index 100% rename from test/e2e/data/infrastructure-vsphere/v1.7/commons/cluster-network-CIDR.yaml rename to test/e2e/data/infrastructure-vsphere/v1.9/commons/cluster-network-CIDR.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/commons/cluster-resource-set-csi-insecure.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/commons/cluster-resource-set-csi-insecure.yaml similarity index 85% rename from test/e2e/data/infrastructure-vsphere/v1.7/commons/cluster-resource-set-csi-insecure.yaml rename to test/e2e/data/infrastructure-vsphere/v1.9/commons/cluster-resource-set-csi-insecure.yaml index 86c659694a..6edd2a37a0 100644 --- a/test/e2e/data/infrastructure-vsphere/v1.7/commons/cluster-resource-set-csi-insecure.yaml +++ b/test/e2e/data/infrastructure-vsphere/v1.9/commons/cluster-resource-set-csi-insecure.yaml @@ -1,15 +1,15 @@ apiVersion: v1 kind: Secret metadata: - name: csi-vsphere-config + name: vsphere-config-secret namespace: '${NAMESPACE}' stringData: data: | apiVersion: v1 kind: Secret metadata: - name: csi-vsphere-config - namespace: kube-system + name: vsphere-config-secret + namespace: vmware-system-csi stringData: csi-vsphere.conf: |+ [Global] diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/commons/cluster-resource-set-label.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/commons/cluster-resource-set-label.yaml similarity index 100% rename from test/e2e/data/infrastructure-vsphere/v1.7/commons/cluster-resource-set-label.yaml rename to test/e2e/data/infrastructure-vsphere/v1.9/commons/cluster-resource-set-label.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/commons/cluster-resource-set.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/commons/cluster-resource-set.yaml similarity index 100% rename from test/e2e/data/infrastructure-vsphere/v1.7/commons/cluster-resource-set.yaml rename to test/e2e/data/infrastructure-vsphere/v1.9/commons/cluster-resource-set.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.9/commons/remove-storage-policy.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/commons/remove-storage-policy.yaml new file mode 100644 index 0000000000..4777dcaa03 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.9/commons/remove-storage-policy.yaml @@ -0,0 +1,2 @@ +- op: remove + path: /spec/template/spec/storagePolicyName \ No newline at end of file diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/base/cluster-template-topology.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/topology/cluster-template-topology.yaml similarity index 52% rename from test/e2e/data/infrastructure-vsphere/v1.7/base/cluster-template-topology.yaml rename to test/e2e/data/infrastructure-vsphere/v1.9/topology/cluster-template-topology.yaml index 078653a4be..54c1fc98be 100644 --- a/test/e2e/data/infrastructure-vsphere/v1.7/base/cluster-template-topology.yaml +++ b/test/e2e/data/infrastructure-vsphere/v1.9/topology/cluster-template-topology.yaml @@ -33,7 +33,7 @@ spec: - name: cp_enable value: "true" - name: vip_interface - value: ${VIP_NETWORK_INTERFACE=""} + value: ${VIP_NETWORK_INTERFACE:=""} - name: address value: ${CONTROL_PLANE_ENDPOINT_IP} - name: port @@ -48,7 +48,11 @@ spec: value: "10" - name: vip_retryperiod value: "2" - image: ghcr.io/kube-vip/kube-vip:v0.5.11 + - name: svc_enable + value: "true" + - name: svc_election + value: "true" + image: ghcr.io/kube-vip/kube-vip:v0.6.3 imagePullPolicy: IfNotPresent name: kube-vip resources: {} @@ -60,16 +64,18 @@ spec: volumeMounts: - mountPath: /etc/kubernetes/admin.conf name: kubeconfig - hostAliases: - - hostnames: - - kubernetes - ip: 127.0.0.1 + - mountPath: /etc/hosts + name: etchosts hostNetwork: true volumes: - hostPath: path: /etc/kubernetes/admin.conf - type: FileOrCreate + type: File name: kubeconfig + - hostPath: + path: /etc/kube-vip.hosts + type: File + name: etchosts - name: controlPlaneIpAddr value: ${CONTROL_PLANE_ENDPOINT_IP} - name: credsSecretName @@ -104,19 +110,9 @@ spec: cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' resources: - kind: Secret - name: vsphere-csi-controller - - kind: ConfigMap - name: vsphere-csi-controller-role - - kind: ConfigMap - name: vsphere-csi-controller-binding - - kind: Secret - name: csi-vsphere-config - - kind: ConfigMap - name: csi.vsphere.vmware.com - - kind: ConfigMap - name: vsphere-csi-node + name: vsphere-config-secret - kind: ConfigMap - name: vsphere-csi-controller + name: csi-manifests - kind: Secret name: cloud-controller-manager - kind: Secret @@ -127,80 +123,126 @@ spec: apiVersion: v1 kind: Secret metadata: - name: vsphere-csi-controller + name: vsphere-config-secret namespace: '${NAMESPACE}' stringData: data: | apiVersion: v1 - kind: ServiceAccount + kind: Secret metadata: - name: vsphere-csi-controller - namespace: kube-system + name: vsphere-config-secret + namespace: vmware-system-csi + stringData: + csi-vsphere.conf: |+ + [Global] + thumbprint = "${VSPHERE_TLS_THUMBPRINT}" + cluster-id = "${NAMESPACE}/${CLUSTER_NAME}" + + [VirtualCenter "${VSPHERE_SERVER}"] + user = "${VSPHERE_USERNAME}" + password = "${VSPHERE_PASSWORD}" + datacenters = "${VSPHERE_DATACENTER}" + + [Network] + public-network = "${VSPHERE_NETWORK}" + + type: Opaque type: addons.cluster.x-k8s.io/resource-set --- apiVersion: v1 data: data: | + --- + apiVersion: v1 + kind: Namespace + metadata: + name: vmware-system-csi + --- + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + name: csi.vsphere.vmware.com + spec: + attachRequired: true + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: vsphere-csi-controller-role rules: - apiGroups: - - storage.k8s.io + - "" resources: - - csidrivers + - nodes + - pods verbs: - - create - - delete + - get + - list + - watch - apiGroups: - "" resources: - - nodes - - pods - - secrets - configmaps verbs: - get - list - watch + - create - apiGroups: - "" resources: - - persistentvolumes + - persistentvolumeclaims verbs: - get - list - watch - update - - create - - delete + - apiGroups: + - "" + resources: + - persistentvolumeclaims/status + verbs: - patch - apiGroups: - - storage.k8s.io + - "" resources: - - volumeattachments + - persistentvolumes verbs: - get - list - watch + - create - update + - delete - patch - apiGroups: - - storage.k8s.io + - "" resources: - - volumeattachments/status + - events verbs: + - get + - list + - watch + - create + - update - patch - apiGroups: - - "" + - coordination.k8s.io resources: - - persistentvolumeclaims + - leases verbs: - get - - list - watch + - list + - delete - update + - create - apiGroups: - storage.k8s.io resources: @@ -211,26 +253,69 @@ data: - list - watch - apiGroups: - - "" + - storage.k8s.io resources: - - events + - volumeattachments verbs: + - get - list - watch + - patch + - apiGroups: + - cns.vmware.com + resources: + - triggercsifullsyncs + verbs: - create + - get - update - - patch + - watch + - list - apiGroups: - - coordination.k8s.io + - cns.vmware.com resources: - - leases + - cnsvspherevolumemigrations verbs: + - create - get + - list - watch + - update + - delete + - apiGroups: + - cns.vmware.com + resources: + - cnsvolumeinfoes + verbs: + - create + - get - list + - watch - delete + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - create - update + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + - apiGroups: + - cns.vmware.com + resources: + - cnsvolumeoperationrequests + verbs: - create + - get + - list + - update + - delete - apiGroups: - snapshot.storage.k8s.io resources: @@ -238,21 +323,43 @@ data: verbs: - get - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - watch + - get + - list - apiGroups: - snapshot.storage.k8s.io resources: - volumesnapshotcontents verbs: + - create - get - list -kind: ConfigMap -metadata: - name: vsphere-csi-controller-role - namespace: '${NAMESPACE}' ---- -apiVersion: v1 -data: - data: | + - watch + - update + - delete + - patch + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents/status + verbs: + - update + - patch + - apiGroups: + - cns.vmware.com + resources: + - csinodetopologies + verbs: + - get + - update + - watch + - list + --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -264,118 +371,409 @@ data: subjects: - kind: ServiceAccount name: vsphere-csi-controller - namespace: kube-system -kind: ConfigMap -metadata: - name: vsphere-csi-controller-binding - namespace: '${NAMESPACE}' ---- -apiVersion: v1 -kind: Secret -metadata: - name: csi-vsphere-config - namespace: '${NAMESPACE}' -stringData: - data: | + namespace: vmware-system-csi + --- apiVersion: v1 - kind: Secret + kind: ServiceAccount metadata: - name: csi-vsphere-config - namespace: kube-system - stringData: - csi-vsphere.conf: |+ - [Global] - thumbprint = "${VSPHERE_TLS_THUMBPRINT}" - cluster-id = "${NAMESPACE}/${CLUSTER_NAME}" - - [VirtualCenter "${VSPHERE_SERVER}"] - user = "${VSPHERE_USERNAME}" - password = "${VSPHERE_PASSWORD}" - datacenters = "${VSPHERE_DATACENTER}" - - [Network] - public-network = "${VSPHERE_NETWORK}" - - type: Opaque -type: addons.cluster.x-k8s.io/resource-set ---- -apiVersion: v1 -data: - data: | - apiVersion: storage.k8s.io/v1 - kind: CSIDriver + name: vsphere-csi-node + namespace: vmware-system-csi + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole metadata: - name: csi.vsphere.vmware.com - spec: - attachRequired: true -kind: ConfigMap -metadata: - name: csi.vsphere.vmware.com - namespace: '${NAMESPACE}' ---- -apiVersion: v1 -data: - data: | - apiVersion: apps/v1 - kind: DaemonSet + name: vsphere-csi-node-cluster-role + rules: + - apiGroups: + - cns.vmware.com + resources: + - csinodetopologies + verbs: + - create + - watch + - get + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding metadata: + name: vsphere-csi-node-cluster-role-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-node-cluster-role + subjects: + - kind: ServiceAccount name: vsphere-csi-node - namespace: kube-system - spec: - selector: - matchLabels: - app: vsphere-csi-node - template: - metadata: - labels: - app: vsphere-csi-node - role: vsphere-csi - spec: - containers: - - args: - - --v=5 - - --csi-address=$(ADDRESS) - - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) - env: - - name: ADDRESS + namespace: vmware-system-csi + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: vsphere-csi-node-role + namespace: vmware-system-csi + rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: vsphere-csi-node-binding + namespace: vmware-system-csi + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: vsphere-csi-node-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-node + namespace: vmware-system-csi + --- + apiVersion: v1 + data: + async-query-volume: "true" + block-volume-snapshot: "true" + cnsmgr-suspend-create-volume: "true" + csi-auth-check: "true" + csi-internal-generated-cluster-id: "true" + csi-migration: "true" + csi-windows-support: "true" + list-volumes: "true" + listview-tasks: "true" + max-pvscsi-targets-per-vm: "true" + multi-vcenter-csi-topology: "true" + online-volume-extend: "true" + pv-to-backingdiskobjectid-mapping: "false" + topology-preferential-datastores: "true" + trigger-csi-fullsync: "false" + kind: ConfigMap + metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: vmware-system-csi + --- + apiVersion: v1 + kind: Service + metadata: + labels: + app: vsphere-csi-controller + name: vsphere-csi-controller + namespace: vmware-system-csi + spec: + ports: + - name: ctlr + port: 2112 + protocol: TCP + targetPort: 2112 + - name: syncer + port: 2113 + protocol: TCP + targetPort: 2113 + selector: + app: vsphere-csi-controller + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vsphere-csi-controller + topologyKey: kubernetes.io/hostname + containers: + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + - --kube-api-qps=100 + - --kube-api-burst=100 + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-attacher:v4.3.0 + name: csi-attacher + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --timeout=300s + - --handle-volume-inuse-error=false + - --csi-address=$(ADDRESS) + - --kube-api-qps=100 + - --kube-api-burst=100 + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-resizer:v1.8.0 + name: csi-resizer + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: controller + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.1.0 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + periodSeconds: 180 + timeoutSeconds: 10 + name: vsphere-csi-controller + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + - containerPort: 2112 + name: prometheus + protocol: TCP + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.10.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --leader-election + - --leader-election-lease-duration=30s + - --leader-election-renew-deadline=20s + - --leader-election-retry-period=10s + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" + - name: GODEBUG + value: x509sha1=1 + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v3.1.0 + imagePullPolicy: Always + name: vsphere-syncer + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --kube-api-qps=100 + - --kube-api-burst=100 + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-provisioner:v3.5.0 + name: csi-provisioner + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --kube-api-qps=100 + - --kube-api-burst=100 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2 + name: csi-snapshotter + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: Default + priorityClassName: system-cluster-critical + serviceAccountName: vsphere-csi-controller + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: vsphere-config-secret + - emptyDir: {} + name: socket-dir + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node + namespace: vmware-system-csi + spec: + selector: + matchLabels: + app: vsphere-csi-node + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS value: /csi/csi.sock - name: DRIVER_REG_SOCK_PATH value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock - image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 - lifecycle: - preStop: - exec: - command: - - /bin/sh - - -c - - rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=/var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + - --mode=kubelet-registration-probe + initialDelaySeconds: 3 name: node-driver-registrar - resources: {} - securityContext: - privileged: true volumeMounts: - mountPath: /csi name: plugin-dir - mountPath: /registration name: registration-dir - - env: + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName - name: CSI_ENDPOINT value: unix:///csi/csi.sock + - name: MAX_VOLUMES_PER_NODE + value: "59" - name: X_CSI_MODE value: node - name: X_CSI_SPEC_REQ_VALIDATION value: "false" - - name: VSPHERE_CSI_CONFIG - value: /etc/cloud/csi-vsphere.conf + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" - name: LOGGER_LEVEL value: PRODUCTION - - name: X_CSI_LOG_LEVEL - value: INFO - - name: NODE_NAME + - name: GODEBUG + value: x509sha1=1 + - name: CSI_NAMESPACE valueFrom: fieldRef: - fieldPath: spec.nodeName - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 + fieldPath: metadata.namespace + - name: NODEGETINFO_WATCH_TIMEOUT_MINUTES + value: "1" + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.1.0 + imagePullPolicy: Always livenessProbe: failureThreshold: 3 httpGet: @@ -383,13 +781,12 @@ data: port: healthz initialDelaySeconds: 10 periodSeconds: 5 - timeoutSeconds: 3 + timeoutSeconds: 5 name: vsphere-csi-node ports: - containerPort: 9808 name: healthz protocol: TCP - resources: {} securityContext: allowPrivilegeEscalation: true capabilities: @@ -397,8 +794,6 @@ data: - SYS_ADMIN privileged: true volumeMounts: - - mountPath: /etc/cloud - name: vsphere-config-volume - mountPath: /csi name: plugin-dir - mountPath: /var/lib/kubelet @@ -406,30 +801,36 @@ data: name: pods-mount-dir - mountPath: /dev name: device-dir + - mountPath: /sys/block + name: blocks-dir + - mountPath: /sys/devices + name: sys-devices-dir - args: + - --v=4 - --csi-address=/csi/csi.sock - image: quay.io/k8scsi/livenessprobe:v2.1.0 + image: registry.k8s.io/sig-storage/livenessprobe:v2.10.0 name: liveness-probe - resources: {} volumeMounts: - mountPath: /csi name: plugin-dir - dnsPolicy: Default + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: vsphere-csi-node tolerations: - - effect: NoSchedule - operator: Exists - effect: NoExecute operator: Exists + - effect: NoSchedule + operator: Exists volumes: - - name: vsphere-config-volume - secret: - secretName: csi-vsphere-config - hostPath: path: /var/lib/kubelet/plugins_registry type: Directory name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com type: DirectoryOrCreate name: plugin-dir - hostPath: @@ -439,59 +840,89 @@ data: - hostPath: path: /dev name: device-dir + - hostPath: + path: /sys/block + type: Directory + name: blocks-dir + - hostPath: + path: /sys/devices + type: Directory + name: sys-devices-dir updateStrategy: + rollingUpdate: + maxUnavailable: 1 type: RollingUpdate -kind: ConfigMap -metadata: - name: vsphere-csi-node - namespace: '${NAMESPACE}' ---- -apiVersion: v1 -data: - data: | + --- apiVersion: apps/v1 - kind: Deployment + kind: DaemonSet metadata: - name: vsphere-csi-controller - namespace: kube-system + name: vsphere-csi-node-windows + namespace: vmware-system-csi spec: - replicas: 1 selector: matchLabels: - app: vsphere-csi-controller + app: vsphere-csi-node-windows template: metadata: labels: - app: vsphere-csi-controller - role: vsphere-csi + app: vsphere-csi-node-windows + role: vsphere-csi-windows spec: containers: - args: - - --v=4 - - --timeout=300s + - --v=5 - --csi-address=$(ADDRESS) - - --leader-election + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) env: - name: ADDRESS - value: /csi/csi.sock - image: quay.io/k8scsi/csi-attacher:v3.0.0 - name: csi-attacher - resources: {} + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\csi.vsphere.vmware.com\\csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=C:\\var\\lib\\kubelet\\plugins\\csi.vsphere.vmware.com\\csi.sock + - --mode=kubelet-registration-probe + initialDelaySeconds: 3 + name: node-driver-registrar volumeMounts: - mountPath: /csi - name: socket-dir - - env: + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + value: unix://C:\\csi\\csi.sock + - name: MAX_VOLUMES_PER_NODE + value: "59" - name: X_CSI_MODE - value: controller - - name: VSPHERE_CSI_CONFIG - value: /etc/cloud/csi-vsphere.conf + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" - name: LOGGER_LEVEL value: PRODUCTION - name: X_CSI_LOG_LEVEL - value: INFO - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 + value: DEBUG + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODEGETINFO_WATCH_TIMEOUT_MINUTES + value: "1" + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.1.0 + imagePullPolicy: Always livenessProbe: failureThreshold: 3 httpGet: @@ -499,79 +930,78 @@ data: port: healthz initialDelaySeconds: 10 periodSeconds: 5 - timeoutSeconds: 3 - name: vsphere-csi-controller + timeoutSeconds: 5 + name: vsphere-csi-node ports: - containerPort: 9808 name: healthz protocol: TCP - resources: {} volumeMounts: - - mountPath: /etc/cloud - name: vsphere-config-volume - readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - args: - - --csi-address=$(ADDRESS) - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - image: quay.io/k8scsi/livenessprobe:v2.1.0 - name: liveness-probe - resources: {} - volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - args: - - --leader-election - env: - - name: X_CSI_FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: LOGGER_LEVEL - value: PRODUCTION - - name: VSPHERE_CSI_CONFIG - value: /etc/cloud/csi-vsphere.conf - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.1.0 - name: vsphere-syncer - resources: {} - volumeMounts: - - mountPath: /etc/cloud - name: vsphere-config-volume - readOnly: true + - mountPath: C:\csi + name: plugin-dir + - mountPath: C:\var\lib\kubelet + name: pods-mount-dir + - mountPath: \\.\pipe\csi-proxy-volume-v1 + name: csi-proxy-volume-v1 + - mountPath: \\.\pipe\csi-proxy-filesystem-v1 + name: csi-proxy-filesystem-v1 + - mountPath: \\.\pipe\csi-proxy-disk-v1 + name: csi-proxy-disk-v1 + - mountPath: \\.\pipe\csi-proxy-system-v1alpha1 + name: csi-proxy-system-v1alpha1 - args: - --v=4 - - --timeout=300s - - --csi-address=$(ADDRESS) - - --leader-election - - --default-fstype=ext4 - env: - - name: ADDRESS - value: /csi/csi.sock - image: quay.io/k8scsi/csi-provisioner:v2.0.0 - name: csi-provisioner - resources: {} + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.10.0 + name: liveness-probe volumeMounts: - mountPath: /csi - name: socket-dir - dnsPolicy: Default - serviceAccountName: vsphere-csi-controller + name: plugin-dir + nodeSelector: + kubernetes.io/os: windows + priorityClassName: system-node-critical + serviceAccountName: vsphere-csi-node tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master + - effect: NoExecute operator: Exists - effect: NoSchedule - key: node-role.kubernetes.io/control-plane operator: Exists volumes: - - name: vsphere-config-volume - secret: - secretName: csi-vsphere-config - - emptyDir: {} - name: socket-dir + - hostPath: + path: C:\var\lib\kubelet\plugins_registry\ + type: Directory + name: registration-dir + - hostPath: + path: C:\var\lib\kubelet\plugins\csi.vsphere.vmware.com\ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: \var\lib\kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + type: "" + name: csi-proxy-disk-v1 + - hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + type: "" + name: csi-proxy-volume-v1 + - hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + type: "" + name: csi-proxy-filesystem-v1 + - hostPath: + path: \\.\pipe\csi-proxy-system-v1alpha1 + type: "" + name: csi-proxy-system-v1alpha1 + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate kind: ConfigMap metadata: - name: vsphere-csi-controller + name: csi-manifests namespace: '${NAMESPACE}' --- apiVersion: v1 diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/topology/kustomization.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/topology/kustomization.yaml similarity index 70% rename from test/e2e/data/infrastructure-vsphere/v1.7/topology/kustomization.yaml rename to test/e2e/data/infrastructure-vsphere/v1.9/topology/kustomization.yaml index cda508b384..76ce47f3e9 100644 --- a/test/e2e/data/infrastructure-vsphere/v1.7/topology/kustomization.yaml +++ b/test/e2e/data/infrastructure-vsphere/v1.9/topology/kustomization.yaml @@ -1,9 +1,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ../base/cluster-template-topology.yaml + - cluster-template-topology.yaml - ../commons/cluster-resource-set.yaml patchesStrategicMerge: - ../commons/cluster-resource-set-label.yaml - ../commons/cluster-network-CIDR.yaml - - ../commons/cluster-resource-set-csi-insecure.yaml + - ../commons/cluster-resource-set-csi-insecure.yaml \ No newline at end of file diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/workload/kustomization.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/workload/kustomization.yaml similarity index 100% rename from test/e2e/data/infrastructure-vsphere/v1.7/workload/kustomization.yaml rename to test/e2e/data/infrastructure-vsphere/v1.9/workload/kustomization.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/workload/workload-control-plane-endpoint-ip.yaml b/test/e2e/data/infrastructure-vsphere/v1.9/workload/workload-control-plane-endpoint-ip.yaml similarity index 100% rename from test/e2e/data/infrastructure-vsphere/v1.7/workload/workload-control-plane-endpoint-ip.yaml rename to test/e2e/data/infrastructure-vsphere/v1.9/workload/workload-control-plane-endpoint-ip.yaml diff --git a/test/e2e/data/shared/main/v1beta1_provider/metadata.yaml b/test/e2e/data/shared/main/v1beta1_provider/metadata.yaml index e0b9aeaf60..b34a660d3c 100644 --- a/test/e2e/data/shared/main/v1beta1_provider/metadata.yaml +++ b/test/e2e/data/shared/main/v1beta1_provider/metadata.yaml @@ -6,13 +6,13 @@ apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 kind: Metadata releaseSeries: - - major: 1 - minor: 7 - contract: v1beta1 - major: 1 minor: 8 contract: v1beta1 - major: 1 minor: 9 contract: v1beta1 + - major: 1 + minor: 10 + contract: v1beta1 diff --git a/test/e2e/data/shared/v1.7/v1beta1/metadata.yaml b/test/e2e/data/shared/v1.9/v1beta1/metadata.yaml similarity index 85% rename from test/e2e/data/shared/v1.7/v1beta1/metadata.yaml rename to test/e2e/data/shared/v1.9/v1beta1/metadata.yaml index 9db497c750..55227b64ef 100644 --- a/test/e2e/data/shared/v1.7/v1beta1/metadata.yaml +++ b/test/e2e/data/shared/v1.9/v1beta1/metadata.yaml @@ -6,6 +6,12 @@ apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 kind: Metadata releaseSeries: + - major: 1 + minor: 6 + contract: v1beta1 + - major: 1 + minor: 5 + contract: v1beta1 - major: 1 minor: 4 contract: v1beta1 @@ -21,4 +27,3 @@ releaseSeries: - major: 1 minor: 0 contract: v1beta1 - diff --git a/test/e2e/data/shared/v1.7/v1beta1_provider/metadata.yaml b/test/e2e/data/shared/v1.9/v1beta1_provider/metadata.yaml similarity index 85% rename from test/e2e/data/shared/v1.7/v1beta1_provider/metadata.yaml rename to test/e2e/data/shared/v1.9/v1beta1_provider/metadata.yaml index ee5bed9c49..77204dba7a 100644 --- a/test/e2e/data/shared/v1.7/v1beta1_provider/metadata.yaml +++ b/test/e2e/data/shared/v1.9/v1beta1_provider/metadata.yaml @@ -7,5 +7,9 @@ apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 kind: Metadata releaseSeries: - major: 1 - minor: 7 + minor: 8 contract: v1beta1 + - major: 1 + minor: 9 + contract: v1beta1 + diff --git a/test/integration/integration-dev.yaml b/test/integration/integration-dev.yaml index e4198b6ac6..bf31acb60f 100644 --- a/test/integration/integration-dev.yaml +++ b/test/integration/integration-dev.yaml @@ -72,7 +72,7 @@ providers: - name: vsphere type: InfrastructureProvider versions: - - name: v1.9.99 + - name: v1.10.99 # Use manifest from source files value: ../../../cluster-api-provider-vsphere/config/deployments/integration-tests contract: v1beta1 diff --git a/tilt-provider.json b/tilt-provider.json index 22d44d7fc5..ace5e31565 100644 --- a/tilt-provider.json +++ b/tilt-provider.json @@ -1,7 +1,7 @@ { "name": "vsphere", "config": { - "version": "v1.9.99", + "version": "v1.10.99", "image": "gcr.io/k8s-staging-capi-vsphere/cluster-api-vsphere-controller", "live_reload_deps": [ "main.go",