From 01742e5724533002741b9297910186f40524d134 Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Mon, 3 Jun 2024 13:39:26 +0200 Subject: [PATCH 01/13] :sparkles: Add IPAM for nodes --- api/v1alpha1/ionoscloudmachine_types.go | 20 +- api/v1alpha1/ionoscloudmachine_types_test.go | 51 +++ api/v1alpha1/ipam_types.go | 40 ++ api/v1alpha1/suite_test.go | 2 + api/v1alpha1/zz_generated.deepcopy.go | 36 +- cmd/main.go | 3 + ...e.cluster.x-k8s.io_ionoscloudmachines.yaml | 135 +++++- ...r.x-k8s.io_ionoscloudmachinetemplates.yaml | 126 ++++++ config/rbac/role.yaml | 19 + .../ionoscloudmachine_controller.go | 9 +- internal/service/cloud/server.go | 46 +- internal/service/cloud/suite_test.go | 2 + internal/service/ipam/ipam.go | 276 ++++++++++++ internal/service/ipam/ipam_test.go | 405 ++++++++++++++++++ 14 files changed, 1142 insertions(+), 28 deletions(-) create mode 100644 api/v1alpha1/ipam_types.go create mode 100644 internal/service/ipam/ipam.go create mode 100644 internal/service/ipam/ipam_test.go diff --git a/api/v1alpha1/ionoscloudmachine_types.go b/api/v1alpha1/ionoscloudmachine_types.go index 1658306a..dffa2857 100644 --- a/api/v1alpha1/ionoscloudmachine_types.go +++ b/api/v1alpha1/ionoscloudmachine_types.go @@ -155,6 +155,9 @@ type IonosCloudMachineSpec struct { //+optional AdditionalNetworks Networks `json:"additionalNetworks,omitempty"` + // IPAMConfig allows to obtain IP Addresses from existing IP pools instead of using DHCP. + IPAMConfig `json:",inline"` + // FailoverIP can be set to enable failover for VMs in the same MachineDeployment. // It can be either set to an already reserved IPv4 address, or it can be set to "AUTO" // which will automatically reserve an IPv4 address for the Failover Group. @@ -183,6 +186,9 @@ type Network struct { // This LAN will be excluded from the deletion process. //+kubebuilder:validation:Minimum=1 NetworkID int32 `json:"networkID"` + + // IPAMConfig allows to obtain IP Addresses from existing IP pools instead of using DHCP. + IPAMConfig `json:",inline"` } // Volume is the physical storage on the VM. @@ -228,7 +234,7 @@ type IonosCloudMachineStatus struct { Ready bool `json:"ready"` // MachineNetworkInfo contains information about the network configuration of the VM. - // This information is only available after the VM has been provisioned. + //+optional MachineNetworkInfo *MachineNetworkInfo `json:"machineNetworkInfo,omitempty"` // FailureReason will be set in the event that there is a terminal problem @@ -280,6 +286,8 @@ type IonosCloudMachineStatus struct { } // MachineNetworkInfo contains information about the network configuration of the VM. +// Before the provisioning MachineNetworkInfo may contain IP addresses to be used for provisioning. +// After provisioning this information is available completely. type MachineNetworkInfo struct { // NICInfo holds information about the NICs, which are attached to the VM. //+optional @@ -289,10 +297,16 @@ type MachineNetworkInfo struct { // NICInfo provides information about the NIC of the VM. type NICInfo struct { // IPv4Addresses contains the IPv4 addresses of the NIC. - IPv4Addresses []string `json:"ipv4Addresses"` + // By default, we enable dual-stack, but as we are storing the IP obtained from AddressClaims here before + // creating the VM this can be temporarily empty, e.g. we use DHCP for IPv4 and fixed IP for IPv6. + //+optional + IPv4Addresses []string `json:"ipv4Addresses,omitempty"` // IPv6Addresses contains the IPv6 addresses of the NIC. - IPv6Addresses []string `json:"ipv6Addresses"` + // By default, we enable dual-stack, but as we are storing the IP obtained from AddressClaims here before + // creating the VM this can be temporarily empty, e.g. we use DHCP for IPv6 and fixed IP for IPv4. + //+optional + IPv6Addresses []string `json:"ipv6Addresses,omitempty"` // NetworkID is the ID of the LAN to which the NIC is connected. NetworkID int32 `json:"networkID"` diff --git a/api/v1alpha1/ionoscloudmachine_types_test.go b/api/v1alpha1/ionoscloudmachine_types_test.go index db38c274..57350ef4 100644 --- a/api/v1alpha1/ionoscloudmachine_types_test.go +++ b/api/v1alpha1/ionoscloudmachine_types_test.go @@ -64,6 +64,20 @@ func defaultMachine() *IonosCloudMachine { } } +func setInvalidPoolRef(m *IonosCloudMachine, poolType string, kind, apiGroup, name string) { + ref := &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(apiGroup), + Kind: kind, + Name: name, + } + switch poolType { + case "IPv6": + m.Spec.AdditionalNetworks[0].IPv6PoolRef = ref + case "IPv4": + m.Spec.AdditionalNetworks[0].IPv4PoolRef = ref + } +} + var _ = Describe("IonosCloudMachine Tests", func() { AfterEach(func() { m := &IonosCloudMachine{ @@ -337,6 +351,43 @@ var _ = Describe("IonosCloudMachine Tests", func() { m.Spec.AdditionalNetworks[0].NetworkID = -1 Expect(k8sClient.Create(context.Background(), m)).ToNot(Succeed()) }) + DescribeTable("should allow IPv4PoolRef.Kind GlobalInClusterIPPool and InClusterIPPool", func(kind string) { + m := defaultMachine() + m.Spec.AdditionalNetworks[0].IPv4PoolRef = &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: kind, + Name: "ipv4-pool", + } + Expect(k8sClient.Create(context.Background(), m)).To(Succeed()) + }, + Entry("GlobalInClusterIPPool", "GlobalInClusterIPPool"), + Entry("InClusterIPPool", "InClusterIPPool"), + ) + DescribeTable("should allow IPv6PoolRef.Kind GlobalInClusterIPPool and InClusterIPPool", func(kind string) { + m := defaultMachine() + m.Spec.AdditionalNetworks[0].IPv6PoolRef = &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: kind, + Name: "ipv6-pool", + } + Expect(k8sClient.Create(context.Background(), m)).To(Succeed()) + }, + Entry("GlobalInClusterIPPool", "GlobalInClusterIPPool"), + Entry("InClusterIPPool", "InClusterIPPool"), + ) + DescribeTable("must not allow invalid pool references", + func(poolType, kind, apiGroup, name string) { + m := defaultMachine() + setInvalidPoolRef(m, poolType, kind, apiGroup, name) + Expect(k8sClient.Create(context.Background(), m)).ToNot(Succeed()) + }, + Entry("invalid IPv6PoolRef with invalid kind", "IPv6", "SomeOtherIPPoolKind", "ipam.cluster.x-k8s.io", "ipv6-pool"), + Entry("invalid IPv6PoolRef with invalid apiGroup", "IPv6", "InClusterIPPool", "SomeWrongAPIGroup", "ipv6-pool"), + Entry("invalid IPv6PoolRef with empty name", "IPv6", "InClusterIPPool", "ipam.cluster.x-k8s.io", ""), + Entry("invalid IPv4PoolRef with invalid kind", "IPv4", "SomeOtherIPPoolKind", "ipam.cluster.x-k8s.io", "ipv4-pool"), + Entry("invalid IPv4PoolRef with invalid apiGroup", "IPv4", "InClusterIPPool", "SomeWrongAPIGroup", "ipv4-pool"), + Entry("invalid IPv4PoolRef with empty name", "IPv4", "InClusterIPPool", "ipam.cluster.x-k8s.io", ""), + ) }) }) Context("FailoverIP", func() { diff --git a/api/v1alpha1/ipam_types.go b/api/v1alpha1/ipam_types.go new file mode 100644 index 00000000..20f2cf0a --- /dev/null +++ b/api/v1alpha1/ipam_types.go @@ -0,0 +1,40 @@ +/* +Copyright 2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// IPAMConfig contains the config for ip address management. +type IPAMConfig struct { + // IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. + // The nic will use an available IP address from the referenced pool. + // +kubebuilder:validation:XValidation:rule="self.apiGroup == 'ipam.cluster.x-k8s.io'",message="ipv4PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io" + // +kubebuilder:validation:XValidation:rule="self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool'",message="ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool" + // +kubebuilder:validation:XValidation:rule="self.name != ''",message="ipv4PoolRef.name is required" + // +optional + IPv4PoolRef *corev1.TypedLocalObjectReference `json:"ipv4PoolRef,omitempty"` + + // IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. + // The nic will use an available IP address from the referenced pool. + // +kubebuilder:validation:XValidation:rule="self.apiGroup == 'ipam.cluster.x-k8s.io'",message="ipv6PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io" + // +kubebuilder:validation:XValidation:rule="self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool'",message="ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool" + // +kubebuilder:validation:XValidation:rule="self.name != ''",message="ipv6PoolRef.name is required" + // +optional + IPv6PoolRef *corev1.TypedLocalObjectReference `json:"ipv6PoolRef,omitempty"` +} diff --git a/api/v1alpha1/suite_test.go b/api/v1alpha1/suite_test.go index 96f12c78..f90453bf 100644 --- a/api/v1alpha1/suite_test.go +++ b/api/v1alpha1/suite_test.go @@ -21,6 +21,7 @@ import ( "testing" "k8s.io/apimachinery/pkg/runtime" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -53,6 +54,7 @@ var _ = BeforeSuite(func() { scheme := runtime.NewScheme() Expect(AddToScheme(scheme)).To(Succeed()) + Expect(ipamv1.AddToScheme(scheme)).To(Succeed()) cfg, err := testEnv.Start() Expect(err).ToNot(HaveOccurred()) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 9aef4f4c..c7240835 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -21,11 +21,37 @@ limitations under the License. package v1alpha1 import ( + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/errors" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMConfig) DeepCopyInto(out *IPAMConfig) { + *out = *in + if in.IPv4PoolRef != nil { + in, out := &in.IPv4PoolRef, &out.IPv4PoolRef + *out = new(v1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + if in.IPv6PoolRef != nil { + in, out := &in.IPv6PoolRef, &out.IPv6PoolRef + *out = new(v1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfig. +func (in *IPAMConfig) DeepCopy() *IPAMConfig { + if in == nil { + return nil + } + out := new(IPAMConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { *out = *in @@ -231,8 +257,11 @@ func (in *IonosCloudMachineSpec) DeepCopyInto(out *IonosCloudMachineSpec) { if in.AdditionalNetworks != nil { in, out := &in.AdditionalNetworks, &out.AdditionalNetworks *out = make(Networks, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } + in.IPAMConfig.DeepCopyInto(&out.IPAMConfig) if in.FailoverIP != nil { in, out := &in.FailoverIP, &out.FailoverIP *out = new(string) @@ -433,6 +462,7 @@ func (in *NICInfo) DeepCopy() *NICInfo { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Network) DeepCopyInto(out *Network) { *out = *in + in.IPAMConfig.DeepCopyInto(&out.IPAMConfig) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. @@ -450,7 +480,9 @@ func (in Networks) DeepCopyInto(out *Networks) { { in := &in *out = make(Networks, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } diff --git a/cmd/main.go b/cmd/main.go index 3f1dcf51..ebbe8ab3 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -28,6 +28,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/cluster-api/util/flags" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -53,6 +54,8 @@ func init() { utilruntime.Must(clusterv1.AddToScheme(scheme)) utilruntime.Must(infrav1.AddToScheme(scheme)) + utilruntime.Must(ipamv1.AddToScheme(scheme)) + //+kubebuilder:scaffold:scheme } diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml index 8be7386e..37adf624 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml @@ -70,6 +70,64 @@ spec: items: description: Network contains the config for additional LANs. properties: + ipv4PoolRef: + description: |- + IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. + The nic will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv4PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' + - message: ipv4PoolRef.name is required + rule: self.name != '' + ipv6PoolRef: + description: |- + IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. + The nic will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv6PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' + - message: ipv6PoolRef.name is required + rule: self.name != '' networkID: description: |- NetworkID represents an ID an existing LAN in the data center. @@ -164,6 +222,64 @@ spec: rule: self == oldSelf - message: failoverIP must be either 'AUTO' or a valid IPv4 address rule: self == "AUTO" || self.matches("((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}$") + ipv4PoolRef: + description: |- + IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. + The nic will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv4PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' + - message: ipv4PoolRef.name is required + rule: self.name != '' + ipv6PoolRef: + description: |- + IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. + The nic will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv6PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' + - message: ipv6PoolRef.name is required + rule: self.name != '' memoryMB: default: 3072 description: |- @@ -318,9 +434,8 @@ spec: controller's output. type: string machineNetworkInfo: - description: |- - MachineNetworkInfo contains information about the network configuration of the VM. - This information is only available after the VM has been provisioned. + description: MachineNetworkInfo contains information about the network + configuration of the VM. properties: nicInfo: description: NICInfo holds information about the NICs, which are @@ -330,14 +445,18 @@ spec: VM. properties: ipv4Addresses: - description: IPv4Addresses contains the IPv4 addresses of - the NIC. + description: |- + IPv4Addresses contains the IPv4 addresses of the NIC. + By default, we enable dual-stack, but as we are storing the IP obtained from AddressClaims here before + creating the VM this can be temporarily empty, e.g. we use DHCP for IPv4 and fixed IP for IPv6. items: type: string type: array ipv6Addresses: - description: IPv6Addresses contains the IPv6 addresses of - the NIC. + description: |- + IPv6Addresses contains the IPv6 addresses of the NIC. + By default, we enable dual-stack, but as we are storing the IP obtained from AddressClaims here before + creating the VM this can be temporarily empty, e.g. we use DHCP for IPv6 and fixed IP for IPv4. items: type: string type: array @@ -351,8 +470,6 @@ spec: NIC of the VM. type: boolean required: - - ipv4Addresses - - ipv6Addresses - networkID - primary type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml index bcadd90a..c16f8585 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml @@ -80,6 +80,74 @@ spec: description: Network contains the config for additional LANs. properties: + ipv4PoolRef: + description: |- + IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. + The nic will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv4PoolRef allows only IPAMConfig apiGroup + ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv4PoolRef allows either InClusterIPPool + or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind + == 'GlobalInClusterIPPool' + - message: ipv4PoolRef.name is required + rule: self.name != '' + ipv6PoolRef: + description: |- + IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. + The nic will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv6PoolRef allows only IPAMConfig apiGroup + ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv6PoolRef allows either InClusterIPPool + or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind + == 'GlobalInClusterIPPool' + - message: ipv6PoolRef.name is required + rule: self.name != '' networkID: description: |- NetworkID represents an ID an existing LAN in the data center. @@ -177,6 +245,64 @@ spec: - message: failoverIP must be either 'AUTO' or a valid IPv4 address rule: self == "AUTO" || self.matches("((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}$") + ipv4PoolRef: + description: |- + IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. + The nic will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv4PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' + - message: ipv4PoolRef.name is required + rule: self.name != '' + ipv6PoolRef: + description: |- + IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. + The nic will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv6PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' + - message: ipv6PoolRef.name is required + rule: self.name != '' memoryMB: default: 3072 description: |- diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 9e48d673..4173f90c 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -106,3 +106,22 @@ rules: - get - patch - update +- apiGroups: + - ipam.cluster.x-k8s.io + resources: + - ipaddressclaims + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - ipam.cluster.x-k8s.io + resources: + - ipaddresses + verbs: + - get + - list + - watch diff --git a/internal/controller/ionoscloudmachine_controller.go b/internal/controller/ionoscloudmachine_controller.go index 9a85479c..75fc4e42 100644 --- a/internal/controller/ionoscloudmachine_controller.go +++ b/internal/controller/ionoscloudmachine_controller.go @@ -36,6 +36,7 @@ import ( infrav1 "github.com/ionos-cloud/cluster-api-provider-ionoscloud/api/v1alpha1" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/service/cloud" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/service/ipam" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/util/locker" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" ) @@ -62,6 +63,8 @@ func NewIonosCloudMachineReconciler(mgr ctrl.Manager) *IonosCloudMachineReconcil //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=ionoscloudmachines/finalizers,verbs=update //+kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch +//+kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddresses,verbs=get;list;watch +//+kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddressclaims,verbs=get;list;watch;create;update;delete //+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;update //+kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch @@ -138,11 +141,11 @@ func (r *IonosCloudMachineReconciler) Reconcile( return r.reconcileDelete(ctx, machineScope, cloudService) } - return r.reconcileNormal(ctx, cloudService, machineScope) + return r.reconcileNormal(ctx, machineScope, cloudService) } func (r *IonosCloudMachineReconciler) reconcileNormal( - ctx context.Context, cloudService *cloud.Service, machineScope *scope.Machine, + ctx context.Context, machineScope *scope.Machine, cloudService *cloud.Service, ) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) log.V(4).Info("Reconciling IonosCloudMachine") @@ -178,8 +181,10 @@ func (r *IonosCloudMachineReconciler) reconcileNormal( return ctrl.Result{RequeueAfter: defaultReconcileDuration}, nil } + ipamHelper := ipam.NewHelper(r.Client, log) reconcileSequence := []serviceReconcileStep[scope.Machine]{ {"ReconcileLAN", cloudService.ReconcileLAN}, + {"ReconcileIPAddressClaims", ipamHelper.ReconcileIPAddresses}, {"ReconcileServer", cloudService.ReconcileServer}, {"ReconcileIPFailover", cloudService.ReconcileIPFailover}, {"FinalizeMachineProvisioning", cloudService.FinalizeMachineProvisioning}, diff --git a/internal/service/cloud/server.go b/internal/service/cloud/server.go index 6b789045..c38adb7b 100644 --- a/internal/service/cloud/server.go +++ b/internal/service/cloud/server.go @@ -404,27 +404,49 @@ func (s *Service) buildServerEntities(ms *scope.Machine, params serverEntityPara Items: &[]sdk.Volume{bootVolume}, } - // As we want to retrieve a public IP from the DHCP, we need to + primaryNIC := sdk.Nic{ + Properties: &sdk.NicProperties{ + Lan: ¶ms.lanID, + Name: ptr.To(s.nicName(ms.IonosMachine)), + }, + } + + if ms.IonosMachine.Status.MachineNetworkInfo != nil { + nicInfo := ms.IonosMachine.Status.MachineNetworkInfo.NICInfo[0] + primaryNIC.Properties.Ips = ptr.To(nicInfo.IPv4Addresses) + primaryNIC.Properties.Ipv6Ips = ptr.To(nicInfo.IPv6Addresses) + } + + primaryNIC.Properties.Dhcp = ptr.To(true) + + // In case we want to retrieve a public IP from the DHCP, we need to // create a NIC with empty IP addresses and patch the NIC afterward. + // To simplify the code we also follow this approach when using IP pools. serverNICs := sdk.Nics{ Items: &[]sdk.Nic{ - { - Properties: &sdk.NicProperties{ - Dhcp: ptr.To(true), - Lan: ¶ms.lanID, - Name: ptr.To(s.nicName(ms.IonosMachine)), - }, - }, + primaryNIC, }, } // Attach server to additional LANs if any. items := *serverNICs.Items - for _, nic := range ms.IonosMachine.Spec.AdditionalNetworks { - items = append(items, sdk.Nic{Properties: &sdk.NicProperties{ - Lan: &nic.NetworkID, - }}) + for i, nw := range ms.IonosMachine.Spec.AdditionalNetworks { + nic := sdk.Nic{ + Properties: &sdk.NicProperties{ + Lan: &nw.NetworkID, + }, + } + + if ms.IonosMachine.Status.MachineNetworkInfo != nil { + nicInfo := ms.IonosMachine.Status.MachineNetworkInfo.NICInfo[i+1] + nic.Properties.Ips = ptr.To(nicInfo.IPv4Addresses) + nic.Properties.Ipv6Ips = ptr.To(nicInfo.IPv6Addresses) + } + + nic.Properties.Dhcp = ptr.To(true) + + items = append(items, nic) } serverNICs.Items = &items diff --git a/internal/service/cloud/suite_test.go b/internal/service/cloud/suite_test.go index 20fa68fc..c99309fc 100644 --- a/internal/service/cloud/suite_test.go +++ b/internal/service/cloud/suite_test.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -171,6 +172,7 @@ func (s *ServiceTestSuite) SetupTest() { scheme := runtime.NewScheme() s.NoError(clusterv1.AddToScheme(scheme), "failed to extend scheme with Cluster API types") + s.NoError(ipamv1.AddToScheme(scheme), "failed to extend scheme with Cluster API ipam types") s.NoError(infrav1.AddToScheme(scheme), "failed to extend scheme with IonosCloud types") s.NoError(clientgoscheme.AddToScheme(scheme)) diff --git a/internal/service/ipam/ipam.go b/internal/service/ipam/ipam.go new file mode 100644 index 00000000..52645819 --- /dev/null +++ b/internal/service/ipam/ipam.go @@ -0,0 +1,276 @@ +/* +Copyright 2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package ipam offers services for IPAM management. +package ipam + +import ( + "context" + "errors" + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + infrav1 "github.com/ionos-cloud/cluster-api-provider-ionoscloud/api/v1alpha1" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" +) + +const ( + // PrimaryNICFormat is the format used for IPAddressClaims for the primary nic. + PrimaryNICFormat = "nic-%s" + + // AdditionalNICFormat is the format used for IPAddressClaims for additional nics. + AdditionalNICFormat = "nic-%s-%d" + + // IPV4Format is the IP v4 format. + IPV4Format = "v4" + + // IPV6Format is the IP v6 format. + IPV6Format = "v6" +) + +// Helper offers ip address management services for IONOS Cloud machine reconciliation. +type Helper struct { + logger logr.Logger + client client.Client +} + +// NewHelper creates new Helper. +func NewHelper(c client.Client, log logr.Logger) *Helper { + h := new(Helper) + h.client = c + h.logger = log + + return h +} + +// ReconcileIPAddresses prevents successful reconciliation of a IonosCloudMachine +// until an IPAMConfig Provider updates each IPAddressClaim associated to the +// IonosCloudMachine with a reference to an IPAddress. The IPAddress is stored in the status. +// This function is a no-op if the IonosCloudMachine has no associated IPAddressClaims. +func (h *Helper) ReconcileIPAddresses(ctx context.Context, machineScope *scope.Machine) (requeue bool, err error) { + log := h.logger.WithName("reconcileIPAddresses") + log.V(4).Info("reconciling IPAddresses.") + + networkInfos := &[]infrav1.NICInfo{} + + waiForIP := false + // primary nic. + waiForIP, err = h.handlePrimaryNIC(ctx, machineScope, networkInfos) + if err != nil { + return true, errors.Join(err, errors.New("unable to handle primary nic")) + } + + if machineScope.IonosMachine.Spec.AdditionalNetworks != nil { + additionalWait, err := h.handleAdditionalNICs(ctx, machineScope, networkInfos) + if err != nil { + return true, errors.Join(err, errors.New("unable to handle additional nics")) + } + waiForIP = waiForIP || additionalWait + } + + // update the status + log.V(4).Info("updating IonosMachine.status.machineNetworkInfo.") + machineScope.IonosMachine.Status.MachineNetworkInfo = &infrav1.MachineNetworkInfo{NICInfo: *networkInfos} + + return waiForIP, nil +} + +func (h *Helper) handlePrimaryNIC(ctx context.Context, machineScope *scope.Machine, nics *[]infrav1.NICInfo) (waitForIP bool, err error) { + nic := infrav1.NICInfo{Primary: true} + ipamConfig := machineScope.IonosMachine.Spec.IPAMConfig + nicName := fmt.Sprintf(PrimaryNICFormat, machineScope.IonosMachine.Name) + + // default nic ipv4. + if ipamConfig.IPv4PoolRef != nil { + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, IPV4Format, ipamConfig.IPv4PoolRef) + if err != nil { + return false, err + } + if ip == "" { + waitForIP = true + } else { + nic.IPv4Addresses = []string{ip} + } + } + + // default nic ipv6. + if ipamConfig.IPv6PoolRef != nil { + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, IPV6Format, ipamConfig.IPv6PoolRef) + if err != nil { + return false, err + } + if ip == "" { + waitForIP = true + } else { + nic.IPv6Addresses = []string{ip} + } + } + + *nics = append(*nics, nic) + + return waitForIP, nil +} + +func (h *Helper) handleAdditionalNICs(ctx context.Context, machineScope *scope.Machine, nics *[]infrav1.NICInfo) (waitForIP bool, err error) { + // additional nics. + for _, net := range machineScope.IonosMachine.Spec.AdditionalNetworks { + nic := infrav1.NICInfo{Primary: false} + nicName := fmt.Sprintf(AdditionalNICFormat, machineScope.IonosMachine.Name, net.NetworkID) + if net.IPv4PoolRef != nil { + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, IPV4Format, net.IPv4PoolRef) + if err != nil { + return false, errors.Join(err, fmt.Errorf("unable to handle IPv4Address for nic %s", nicName)) + } + if ip == "" { + waitForIP = true + } else { + nic.IPv6Addresses = []string{ip} + } + } + + if net.IPv6PoolRef != nil { + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, IPV6Format, net.IPv6PoolRef) + if err != nil { + return false, errors.Join(err, fmt.Errorf("unable to handle IPv6Address for nic %s", nicName)) + } + if ip == "" { + waitForIP = true + } else { + nic.IPv6Addresses = []string{ip} + } + } + + *nics = append(*nics, nic) + } + + return waitForIP, nil +} + +// handleIPAddressForNIC checks for an IPAddressClaim. If there is one it extracts the ip from the corresponding IPAddress object, otherwise it creates the IPAddressClaim and returns early. +func (h *Helper) handleIPAddressForNIC(ctx context.Context, machineScope *scope.Machine, nic, format string, poolRef *corev1.TypedLocalObjectReference) (ip string, err error) { + log := h.logger.WithName("handleIPAddressForNIC") + + suffix := "ipv4" + if format == IPV6Format { + suffix = "ipv6" + } + key := client.ObjectKey{ + Namespace: machineScope.IonosMachine.Namespace, + Name: fmt.Sprintf("%s-%s", nic, suffix), + } + + claim, err := h.GetIPAddressClaim(ctx, key) + if err != nil { + if !apierrors.IsNotFound(err) { + return "", err + } + log.V(4).Info("IPAddressClaim not found, creating it.", "nic", nic) + // IPAddressClaim not yet created. + err = h.CreateIPAddressClaim(ctx, machineScope.IonosMachine, key.Name, poolRef) + if err != nil { + return "", errors.Join(err, fmt.Errorf("unable to create IPAddressClaim for machine %s", machineScope.IonosMachine.Name)) + } + // we just created the claim, so we can return early and wait for the creation of the IPAddress. + return "", nil + } + + // we found a claim, lets see if there is an IPAddress + ipAddrName := claim.Status.AddressRef.Name + if ipAddrName == "" { + log.V(4).Info("No IPAddress found yet.", "nic", nic) + return "", nil + } + + ipAddrKey := types.NamespacedName{ + Namespace: machineScope.IonosMachine.Namespace, + Name: ipAddrName, + } + ipAddr, err := h.GetIPAddress(ctx, ipAddrKey) + if err != nil { + return "", errors.Join(err, fmt.Errorf("unable to get IPAddress specified in claim %s", claim.Name)) + } + + ip = ipAddr.Spec.Address + + log.V(4).Info("IPAddress found, ", "ip", ip, "nic", nic) + + return ip, nil +} + +// CreateIPAddressClaim creates an IPAddressClaim for a given object. +func (h *Helper) CreateIPAddressClaim(ctx context.Context, owner client.Object, name string, poolRef *corev1.TypedLocalObjectReference) error { + claimRef := types.NamespacedName{ + Namespace: owner.GetNamespace(), + Name: name, + } + + ipAddrClaim := &ipamv1.IPAddressClaim{} + var err error + if err = h.client.Get(ctx, claimRef, ipAddrClaim); err != nil && !apierrors.IsNotFound(err) { + return err + } + + if !apierrors.IsNotFound(err) { + // IPAddressClaim already exists + return nil + } + + desired := &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: claimRef.Name, + Namespace: claimRef.Namespace, + }, + Spec: ipamv1.IPAddressClaimSpec{ + PoolRef: *poolRef, + }, + } + _, err = controllerutil.CreateOrUpdate(ctx, h.client, desired, func() error { + // set the owner reference to the cluster + return controllerutil.SetControllerReference(owner, desired, h.client.Scheme()) + }) + + return err +} + +// GetIPAddress attempts to retrieve the IPAddress. +func (h *Helper) GetIPAddress(ctx context.Context, key client.ObjectKey) (*ipamv1.IPAddress, error) { + out := &ipamv1.IPAddress{} + err := h.client.Get(ctx, key, out) + if err != nil { + return nil, err + } + + return out, nil +} + +// GetIPAddressClaim attempts to retrieve the IPAddressClaim. +func (h *Helper) GetIPAddressClaim(ctx context.Context, key client.ObjectKey) (*ipamv1.IPAddressClaim, error) { + out := &ipamv1.IPAddressClaim{} + err := h.client.Get(ctx, key, out) + if err != nil { + return nil, err + } + + return out, nil +} diff --git a/internal/service/ipam/ipam_test.go b/internal/service/ipam/ipam_test.go new file mode 100644 index 00000000..9464ea9f --- /dev/null +++ b/internal/service/ipam/ipam_test.go @@ -0,0 +1,405 @@ +/* +Copyright 2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipam + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + infrav1 "github.com/ionos-cloud/cluster-api-provider-ionoscloud/api/v1alpha1" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/ionoscloud/clienttest" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/service/cloud" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/util/locker" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/util/ptr" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" +) + +type IpamTestSuite struct { + *require.Assertions + suite.Suite + k8sClient client.Client + ctx context.Context + machineScope *scope.Machine + clusterScope *scope.Cluster + log logr.Logger + service *cloud.Service + ipamHelper *Helper + capiCluster *clusterv1.Cluster + capiMachine *clusterv1.Machine + infraCluster *infrav1.IonosCloudCluster + infraMachine *infrav1.IonosCloudMachine + ionosClient *clienttest.MockClient +} + +func (s *IpamTestSuite) SetupSuite() { + s.log = logr.Discard() + s.ctx = context.Background() + s.Assertions = s.Require() +} + +func (s *IpamTestSuite) SetupTest() { + var err error + s.ionosClient = clienttest.NewMockClient(s.T()) + + s.capiCluster = &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-cluster", + UID: "uid", + }, + Spec: clusterv1.ClusterSpec{}, + } + s.infraCluster = &infrav1.IonosCloudCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: s.capiCluster.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: s.capiCluster.Name, + }, + }, + Spec: infrav1.IonosCloudClusterSpec{ + Location: "de/txl", + }, + Status: infrav1.IonosCloudClusterStatus{}, + } + s.capiMachine = &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-machine", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: s.capiCluster.Name, + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: s.capiCluster.Name, + Version: ptr.To("v1.26.12"), + ProviderID: ptr.To("ionos://dd426c63-cd1d-4c02-aca3-13b4a27c2ebf"), + }, + } + s.infraMachine = &infrav1.IonosCloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-machine", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: s.capiCluster.Name, + clusterv1.MachineDeploymentNameLabel: "test-md", + }, + }, + Spec: infrav1.IonosCloudMachineSpec{ + ProviderID: ptr.To("ionos://dd426c63-cd1d-4c02-aca3-13b4a27c2ebf"), + DatacenterID: "ccf27092-34e8-499e-a2f5-2bdee9d34a12", + NumCores: 2, + AvailabilityZone: infrav1.AvailabilityZoneAuto, + MemoryMB: 4096, + CPUFamily: ptr.To("AMD_OPTERON"), + Disk: &infrav1.Volume{ + Name: "test-machine-hdd", + DiskType: infrav1.VolumeDiskTypeHDD, + SizeGB: 20, + AvailabilityZone: infrav1.AvailabilityZoneAuto, + Image: &infrav1.ImageSpec{ + ID: "3e3e3e3e-3e3e-3e3e-3e3e-3e3e3e3e3e3e", + }, + }, + Type: infrav1.ServerTypeEnterprise, + }, + Status: infrav1.IonosCloudMachineStatus{}, + } + + scheme := runtime.NewScheme() + s.NoError(clusterv1.AddToScheme(scheme), "failed to extend scheme with Cluster API types") + s.NoError(ipamv1.AddToScheme(scheme), "failed to extend scheme with Cluster API ipam types") + s.NoError(infrav1.AddToScheme(scheme), "failed to extend scheme with IonosCloud types") + s.NoError(clientgoscheme.AddToScheme(scheme)) + + initObjects := []client.Object{s.infraMachine, s.infraCluster, s.capiCluster, s.capiMachine} + s.k8sClient = fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(initObjects...). + WithStatusSubresource(initObjects...). + Build() + + s.ipamHelper = NewHelper(s.k8sClient, s.log) + s.clusterScope, err = scope.NewCluster(scope.ClusterParams{ + Client: s.k8sClient, + Cluster: s.capiCluster, + IonosCluster: s.infraCluster, + Locker: locker.New(), + }) + s.NoError(err, "failed to create cluster scope") + + s.machineScope, err = scope.NewMachine(scope.MachineParams{ + Client: s.k8sClient, + Machine: s.capiMachine, + ClusterScope: s.clusterScope, + IonosMachine: s.infraMachine, + Locker: locker.New(), + }) + s.NoError(err, "failed to create machine scope") + + s.service, err = cloud.NewService(s.ionosClient, s.log) + s.NoError(err, "failed to create service") +} + +func TestIpamTestSuite(t *testing.T) { + suite.Run(t, new(IpamTestSuite)) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesDontCreateClaim() { + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + s.False(requeue) + s.NoError(err) + + // No PoolRefs provided, so the Reconcile must not create a claim. + list := &ipamv1.IPAddressClaimList{} + err = s.k8sClient.List(s.ctx, list) + s.Empty(list.Items) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesPrimaryIpv4CreateClaim() { + poolRef := defaultInClusterIPv4PoolRef() + + s.machineScope.IonosMachine.Spec.IPv4PoolRef = poolRef + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + // IPAddressClaim was created, so we need to wait for the IPAddress to be created externally. + s.True(requeue) + s.NoError(err) + + claim := defaultPrimaryIPv4Claim() + err = s.k8sClient.Get(s.ctx, client.ObjectKeyFromObject(claim), claim) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesPrimaryIpv6CreateClaim() { + poolRef := defaultInClusterIPv6PoolRef() + + s.machineScope.IonosMachine.Spec.IPv6PoolRef = poolRef + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + // IPAddressClaim was created, so we need to wait for the IPAddress to be created externally. + s.True(requeue) + s.NoError(err) + + claim := defaultPrimaryIPv6Claim() + err = s.k8sClient.Get(s.ctx, client.ObjectKeyFromObject(claim), claim) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesPrimaryIpv4GetIPFromClaim() { + poolRef := defaultInClusterIPv4PoolRef() + + claim := defaultPrimaryIPv4Claim() + claim.Status.AddressRef.Name = "nic-test-machine-ipv4-10-0-0-2" + err := s.k8sClient.Create(s.ctx, claim) + s.NoError(err) + + ip := defaultIPv4Address(claim, poolRef) + err = s.k8sClient.Create(s.ctx, ip) + s.NoError(err) + + s.machineScope.IonosMachine.Spec.IPv4PoolRef = poolRef + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + s.False(requeue) + s.NoError(err) + s.Equal("10.0.0.2", s.machineScope.IonosMachine.Status.MachineNetworkInfo.NICInfo[0].IPv4Addresses[0]) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesPrimaryIpv6GetIPFromClaim() { + poolRef := defaultInClusterIPv6PoolRef() + + claim := defaultPrimaryIPv6Claim() + claim.Status.AddressRef.Name = "nic-test-machine-ipv6-2001-db8--" + err := s.k8sClient.Create(s.ctx, claim) + s.NoError(err) + + ip := defaultIPv6Address(claim, poolRef) + err = s.k8sClient.Create(s.ctx, ip) + s.NoError(err) + + s.machineScope.IonosMachine.Spec.IPv6PoolRef = poolRef + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + s.False(requeue) + s.NoError(err) + s.Equal("2001:db8::", s.machineScope.IonosMachine.Status.MachineNetworkInfo.NICInfo[0].IPv6Addresses[0]) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesAdditionalIpv4CreateClaim() { + poolRef := defaultInClusterIPv4PoolRef() + + s.machineScope.IonosMachine.Spec.AdditionalNetworks = defaultAdditionalNetworksIpv4(poolRef) + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + // IPAddressClaim was created, so we need to wait for the IPAddress to be created externally. + s.True(requeue) + s.NoError(err) + + claim := defaultAdditionalIPv4Claim() + err = s.k8sClient.Get(s.ctx, client.ObjectKeyFromObject(claim), claim) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesAdditionalIpv6CreateClaim() { + poolRef := defaultInClusterIPv6PoolRef() + + s.machineScope.IonosMachine.Spec.AdditionalNetworks = defaultAdditionalNetworksIpv6(poolRef) + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + // IPAddressClaim was created, so we need to wait for the IPAddress to be created externally. + s.True(requeue) + s.NoError(err) + + claim := defaultAdditionalIPv6Claim() + err = s.k8sClient.Get(s.ctx, client.ObjectKeyFromObject(claim), claim) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesAdditionalIpv6GetIPFromClaim() { + poolRef := defaultInClusterIPv6PoolRef() + + claim := defaultAdditionalIPv6Claim() + claim.Status.AddressRef.Name = "nic-test-machine-ipv6-2001-db8--" + err := s.k8sClient.Create(s.ctx, claim) + s.NoError(err) + + ip := defaultIPv6Address(claim, poolRef) + err = s.k8sClient.Create(s.ctx, ip) + s.NoError(err) + + s.machineScope.IonosMachine.Spec.AdditionalNetworks = defaultAdditionalNetworksIpv6(poolRef) + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + s.False(requeue) + s.NoError(err) + s.Equal("2001:db8::", s.machineScope.IonosMachine.Status.MachineNetworkInfo.NICInfo[1].IPv6Addresses[0]) +} + +func defaultInClusterIPv4PoolRef() *corev1.TypedLocalObjectReference { + return &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: "InClusterIPPool", + Name: "incluster-ipv4-pool", + } +} + +func defaultInClusterIPv6PoolRef() *corev1.TypedLocalObjectReference { + return &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: "InClusterIPPool", + Name: "incluster-ipv6-pool", + } +} + +func defaultIPv4Address(claim *ipamv1.IPAddressClaim, poolRef *corev1.TypedLocalObjectReference) *ipamv1.IPAddress { + return &ipamv1.IPAddress{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-ipv4-10-0-0-2", + Namespace: "default", + }, + Spec: ipamv1.IPAddressSpec{ + ClaimRef: *localRef(claim), + PoolRef: *poolRef, + Address: "10.0.0.2", + Prefix: 16, + }, + } +} + +func defaultIPv6Address(claim *ipamv1.IPAddressClaim, poolRef *corev1.TypedLocalObjectReference) *ipamv1.IPAddress { + return &ipamv1.IPAddress{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-ipv6-2001-db8--", + Namespace: "default", + }, + Spec: ipamv1.IPAddressSpec{ + ClaimRef: *localRef(claim), + PoolRef: *poolRef, + Address: "2001:db8::", + Prefix: 42, + }, + } +} + +func defaultPrimaryIPv4Claim() *ipamv1.IPAddressClaim { + return &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-ipv4", + Namespace: "default", + }, + } +} + +func defaultAdditionalIPv4Claim() *ipamv1.IPAddressClaim { + return &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-1-ipv4", + Namespace: "default", + }, + } +} + +func defaultAdditionalIPv6Claim() *ipamv1.IPAddressClaim { + return &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-1-ipv6", + Namespace: "default", + }, + } +} + +func defaultAdditionalNetworksIpv6(poolRef *corev1.TypedLocalObjectReference) []infrav1.Network { + return []infrav1.Network{{ + NetworkID: 1, + IPAMConfig: infrav1.IPAMConfig{ + IPv6PoolRef: poolRef, + }, + }} +} + +func defaultAdditionalNetworksIpv4(poolRef *corev1.TypedLocalObjectReference) []infrav1.Network { + return []infrav1.Network{{ + NetworkID: 1, + IPAMConfig: infrav1.IPAMConfig{ + IPv4PoolRef: poolRef, + }, + }} +} + +func defaultPrimaryIPv6Claim() *ipamv1.IPAddressClaim { + return &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-ipv6", + Namespace: "default", + }, + } +} + +func localRef(obj client.Object) *corev1.LocalObjectReference { + return &corev1.LocalObjectReference{ + Name: obj.GetName(), + } +} From dccebb75cc342870e0bf439e6edca5f2f3c2b3f0 Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Tue, 2 Jul 2024 12:25:37 +0200 Subject: [PATCH 02/13] Fix some review comments --- api/v1alpha1/ionoscloudmachine_types.go | 2 + ...e.cluster.x-k8s.io_ionoscloudmachines.yaml | 486 ------------------ ...r.x-k8s.io_ionoscloudmachinetemplates.yaml | 3 + internal/service/ipam/ipam.go | 29 +- 4 files changed, 16 insertions(+), 504 deletions(-) delete mode 100644 config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml diff --git a/api/v1alpha1/ionoscloudmachine_types.go b/api/v1alpha1/ionoscloudmachine_types.go index dffa2857..a7041f3f 100644 --- a/api/v1alpha1/ionoscloudmachine_types.go +++ b/api/v1alpha1/ionoscloudmachine_types.go @@ -178,6 +178,8 @@ type IonosCloudMachineSpec struct { // Networks contains a list of additional LAN IDs // that should be attached to the VM. +// +listType=map +// +listMapKey=networkID type Networks []Network // Network contains the config for additional LANs. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml deleted file mode 100644 index 37adf624..00000000 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml +++ /dev/null @@ -1,486 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.14.0 - name: ionoscloudmachines.infrastructure.cluster.x-k8s.io -spec: - group: infrastructure.cluster.x-k8s.io - names: - categories: - - cluster-api - - ionoscloud - kind: IonosCloudMachine - listKind: IonosCloudMachineList - plural: ionoscloudmachines - shortNames: - - icm - singular: ionoscloudmachine - scope: Namespaced - versions: - - additionalPrinterColumns: - - description: Cluster - jsonPath: .metadata.labels['cluster\.x-k8s\.io/cluster-name'] - name: Cluster - type: string - - description: Machine is ready - jsonPath: .status.ready - name: Ready - type: string - - jsonPath: .status.machineNetworkInfo.nicInfo[*].ipv4Addresses - name: IPv4 Addresses - type: string - - jsonPath: .status.machineNetworkInfo.nicInfo[*].networkID - name: Machine Connected Networks - type: string - - jsonPath: .status.machineNetworkInfo.nicInfo[*].ipv6Addresses - name: IPv6 Addresses - priority: 1 - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: IonosCloudMachine is the Schema for the ionoscloudmachines API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IonosCloudMachineSpec defines the desired state of IonosCloudMachine. - properties: - additionalNetworks: - description: |- - AdditionalNetworks defines the additional network configurations for the VM. - NOTE(lubedacht): We currently only support networks with DHCP enabled. - items: - description: Network contains the config for additional LANs. - properties: - ipv4PoolRef: - description: |- - IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. - The nic will use an available IP address from the referenced pool. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: ipv4PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io - rule: self.apiGroup == 'ipam.cluster.x-k8s.io' - - message: ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool - rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' - - message: ipv4PoolRef.name is required - rule: self.name != '' - ipv6PoolRef: - description: |- - IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. - The nic will use an available IP address from the referenced pool. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: ipv6PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io - rule: self.apiGroup == 'ipam.cluster.x-k8s.io' - - message: ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool - rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' - - message: ipv6PoolRef.name is required - rule: self.name != '' - networkID: - description: |- - NetworkID represents an ID an existing LAN in the data center. - This LAN will be excluded from the deletion process. - format: int32 - minimum: 1 - type: integer - required: - - networkID - type: object - type: array - availabilityZone: - default: AUTO - description: AvailabilityZone is the availability zone in which the - VM should be provisioned. - enum: - - AUTO - - ZONE_1 - - ZONE_2 - type: string - cpuFamily: - description: |- - CPUFamily defines the CPU architecture, which will be used for this VM. - Not all CPU architectures are available in all data centers. - - - If not specified, the cloud will select a suitable CPU family - based on the availability in the data center. - example: AMD_OPTERON - type: string - datacenterID: - description: DatacenterID is the ID of the data center where the VM - should be created in. - format: uuid - type: string - x-kubernetes-validations: - - message: datacenterID is immutable - rule: self == oldSelf - disk: - description: Disk defines the boot volume of the VM. - properties: - availabilityZone: - default: AUTO - description: AvailabilityZone is the availability zone where the - volume will be created. - enum: - - AUTO - - ZONE_1 - - ZONE_2 - - ZONE_3 - type: string - diskType: - default: HDD - description: DiskType defines the type of the hard drive. - enum: - - HDD - - SSD Standard - - SSD Premium - type: string - image: - description: Image is the image to use for the VM. - properties: - id: - description: ID is the ID of the image to use for the VM. - minLength: 1 - type: string - required: - - id - type: object - name: - description: Name is the name of the volume - type: string - sizeGB: - default: 20 - description: SizeGB defines the size of the volume in GB - minimum: 10 - type: integer - required: - - image - type: object - failoverIP: - description: |- - FailoverIP can be set to enable failover for VMs in the same MachineDeployment. - It can be either set to an already reserved IPv4 address, or it can be set to "AUTO" - which will automatically reserve an IPv4 address for the Failover Group. - - - If the machine is a control plane machine, this field will not be taken into account. - type: string - x-kubernetes-validations: - - message: failoverIP is immutable - rule: self == oldSelf - - message: failoverIP must be either 'AUTO' or a valid IPv4 address - rule: self == "AUTO" || self.matches("((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}$") - ipv4PoolRef: - description: |- - IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. - The nic will use an available IP address from the referenced pool. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: ipv4PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io - rule: self.apiGroup == 'ipam.cluster.x-k8s.io' - - message: ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool - rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' - - message: ipv4PoolRef.name is required - rule: self.name != '' - ipv6PoolRef: - description: |- - IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. - The nic will use an available IP address from the referenced pool. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: ipv6PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io - rule: self.apiGroup == 'ipam.cluster.x-k8s.io' - - message: ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool - rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' - - message: ipv6PoolRef.name is required - rule: self.name != '' - memoryMB: - default: 3072 - description: |- - MemoryMB is the memory size for the VM in MB. - Size must be specified in multiples of 256 MB with a minimum of 1024 MB - which is required as we are using hot-pluggable RAM by default. - format: int32 - minimum: 2048 - multipleOf: 1024 - type: integer - numCores: - default: 1 - description: NumCores defines the number of cores for the VM. - format: int32 - minimum: 1 - type: integer - providerID: - description: |- - ProviderID is the IONOS Cloud provider ID - will be in the format ionos://ee090ff2-1eef-48ec-a246-a51a33aa4f3a - type: string - type: - default: ENTERPRISE - description: Type is the server type of the VM. Can be either ENTERPRISE - or VCPU. - enum: - - ENTERPRISE - - VCPU - type: string - x-kubernetes-validations: - - message: type is immutable - rule: self == oldSelf - required: - - datacenterID - - disk - type: object - x-kubernetes-validations: - - message: cpuFamily must not be specified when using VCPU - rule: self.type != 'VCPU' || !has(self.cpuFamily) - status: - description: IonosCloudMachineStatus defines the observed state of IonosCloudMachine. - properties: - conditions: - description: Conditions defines current service state of the IonosCloudMachine. - items: - description: Condition defines an observation of a Cluster API resource - operational state. - properties: - lastTransitionTime: - description: |- - Last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - A human readable message indicating details about the transition. - This field may be empty. - type: string - reason: - description: |- - The reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may not be empty. - type: string - severity: - description: |- - Severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - type: string - status: - description: Status of the condition, one of True, False, Unknown. - type: string - type: - description: |- - Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - currentRequest: - description: |- - CurrentRequest shows the current provisioning request for any - cloud resource that is being provisioned. - properties: - method: - description: Method is the request method - type: string - requestPath: - description: RequestPath is the sub path for the request URL - type: string - state: - description: RequestStatus is the status of the request in the - queue. - enum: - - QUEUED - - RUNNING - - DONE - - FAILED - type: string - required: - - method - - requestPath - type: object - failureMessage: - description: |- - FailureMessage will be set in the event that there is a terminal problem - reconciling the Machine and will contain a more verbose string suitable - for logging and human consumption. - - - This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over - time (like service outages), but instead indicate that something is - fundamentally wrong with the Machine's spec or the configuration of - the controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in the - spec, values that are unsupported by the controller, or the - responsible controller itself being critically misconfigured. - - - Any transient errors that occur during the reconciliation of IonosCloudMachines - can be added as events to the IonosCloudMachine object and/or logged in the - controller's output. - type: string - failureReason: - description: |- - FailureReason will be set in the event that there is a terminal problem - reconciling the Machine and will contain a succinct value suitable - for machine interpretation. - - - This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over - time (like service outages), but instead indicate that something is - fundamentally wrong with the Machine's spec or the configuration of - the controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in the - spec, values that are unsupported by the controller, or the - responsible controller itself being critically misconfigured. - - - Any transient errors that occur during the reconciliation of IonosCloudMachines - can be added as events to the IonosCloudMachine object and/or logged in the - controller's output. - type: string - machineNetworkInfo: - description: MachineNetworkInfo contains information about the network - configuration of the VM. - properties: - nicInfo: - description: NICInfo holds information about the NICs, which are - attached to the VM. - items: - description: NICInfo provides information about the NIC of the - VM. - properties: - ipv4Addresses: - description: |- - IPv4Addresses contains the IPv4 addresses of the NIC. - By default, we enable dual-stack, but as we are storing the IP obtained from AddressClaims here before - creating the VM this can be temporarily empty, e.g. we use DHCP for IPv4 and fixed IP for IPv6. - items: - type: string - type: array - ipv6Addresses: - description: |- - IPv6Addresses contains the IPv6 addresses of the NIC. - By default, we enable dual-stack, but as we are storing the IP obtained from AddressClaims here before - creating the VM this can be temporarily empty, e.g. we use DHCP for IPv6 and fixed IP for IPv4. - items: - type: string - type: array - networkID: - description: NetworkID is the ID of the LAN to which the - NIC is connected. - format: int32 - type: integer - primary: - description: Primary indicates whether the NIC is the primary - NIC of the VM. - type: boolean - required: - - networkID - - primary - type: object - type: array - type: object - ready: - description: Ready indicates the VM has been provisioned and is ready. - type: boolean - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml index c16f8585..c7bd65ab 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml @@ -159,6 +159,9 @@ spec: - networkID type: object type: array + x-kubernetes-list-map-keys: + - networkID + x-kubernetes-list-type: map availabilityZone: default: AUTO description: AvailabilityZone is the availability zone in diff --git a/internal/service/ipam/ipam.go b/internal/service/ipam/ipam.go index 52645819..71c36a47 100644 --- a/internal/service/ipam/ipam.go +++ b/internal/service/ipam/ipam.go @@ -43,13 +43,13 @@ const ( AdditionalNICFormat = "nic-%s-%d" // IPV4Format is the IP v4 format. - IPV4Format = "v4" + IPV4Format = "ipv4" // IPV6Format is the IP v6 format. - IPV6Format = "v6" + IPV6Format = "ipv6" ) -// Helper offers ip address management services for IONOS Cloud machine reconciliation. +// Helper offers IP address management services for IONOS Cloud machine reconciliation. type Helper struct { logger logr.Logger client client.Client @@ -74,26 +74,25 @@ func (h *Helper) ReconcileIPAddresses(ctx context.Context, machineScope *scope.M networkInfos := &[]infrav1.NICInfo{} - waiForIP := false - // primary nic. - waiForIP, err = h.handlePrimaryNIC(ctx, machineScope, networkInfos) + // primary NIC. + requeue, err = h.handlePrimaryNIC(ctx, machineScope, networkInfos) if err != nil { return true, errors.Join(err, errors.New("unable to handle primary nic")) } if machineScope.IonosMachine.Spec.AdditionalNetworks != nil { - additionalWait, err := h.handleAdditionalNICs(ctx, machineScope, networkInfos) + waitForAdditionalIP, err := h.handleAdditionalNICs(ctx, machineScope, networkInfos) if err != nil { return true, errors.Join(err, errors.New("unable to handle additional nics")) } - waiForIP = waiForIP || additionalWait + requeue = requeue || waitForAdditionalIP } // update the status log.V(4).Info("updating IonosMachine.status.machineNetworkInfo.") machineScope.IonosMachine.Status.MachineNetworkInfo = &infrav1.MachineNetworkInfo{NICInfo: *networkInfos} - return waiForIP, nil + return requeue, nil } func (h *Helper) handlePrimaryNIC(ctx context.Context, machineScope *scope.Machine, nics *[]infrav1.NICInfo) (waitForIP bool, err error) { @@ -101,7 +100,7 @@ func (h *Helper) handlePrimaryNIC(ctx context.Context, machineScope *scope.Machi ipamConfig := machineScope.IonosMachine.Spec.IPAMConfig nicName := fmt.Sprintf(PrimaryNICFormat, machineScope.IonosMachine.Name) - // default nic ipv4. + // default NIC ipv4. if ipamConfig.IPv4PoolRef != nil { ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, IPV4Format, ipamConfig.IPv4PoolRef) if err != nil { @@ -114,7 +113,7 @@ func (h *Helper) handlePrimaryNIC(ctx context.Context, machineScope *scope.Machi } } - // default nic ipv6. + // default NIC ipv6. if ipamConfig.IPv6PoolRef != nil { ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, IPV6Format, ipamConfig.IPv6PoolRef) if err != nil { @@ -133,7 +132,6 @@ func (h *Helper) handlePrimaryNIC(ctx context.Context, machineScope *scope.Machi } func (h *Helper) handleAdditionalNICs(ctx context.Context, machineScope *scope.Machine, nics *[]infrav1.NICInfo) (waitForIP bool, err error) { - // additional nics. for _, net := range machineScope.IonosMachine.Spec.AdditionalNetworks { nic := infrav1.NICInfo{Primary: false} nicName := fmt.Sprintf(AdditionalNICFormat, machineScope.IonosMachine.Name, net.NetworkID) @@ -168,13 +166,9 @@ func (h *Helper) handleAdditionalNICs(ctx context.Context, machineScope *scope.M } // handleIPAddressForNIC checks for an IPAddressClaim. If there is one it extracts the ip from the corresponding IPAddress object, otherwise it creates the IPAddressClaim and returns early. -func (h *Helper) handleIPAddressForNIC(ctx context.Context, machineScope *scope.Machine, nic, format string, poolRef *corev1.TypedLocalObjectReference) (ip string, err error) { +func (h *Helper) handleIPAddressForNIC(ctx context.Context, machineScope *scope.Machine, nic, suffix string, poolRef *corev1.TypedLocalObjectReference) (ip string, err error) { log := h.logger.WithName("handleIPAddressForNIC") - suffix := "ipv4" - if format == IPV6Format { - suffix = "ipv6" - } key := client.ObjectKey{ Namespace: machineScope.IonosMachine.Namespace, Name: fmt.Sprintf("%s-%s", nic, suffix), @@ -186,7 +180,6 @@ func (h *Helper) handleIPAddressForNIC(ctx context.Context, machineScope *scope. return "", err } log.V(4).Info("IPAddressClaim not found, creating it.", "nic", nic) - // IPAddressClaim not yet created. err = h.CreateIPAddressClaim(ctx, machineScope.IonosMachine, key.Name, poolRef) if err != nil { return "", errors.Join(err, fmt.Errorf("unable to create IPAddressClaim for machine %s", machineScope.IonosMachine.Name)) From 74078207417a1f25f32639c904204506cac0770d Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Tue, 2 Jul 2024 15:05:21 +0200 Subject: [PATCH 03/13] Fix some review comments --- .../ionoscloudmachine_controller.go | 2 + internal/service/ipam/ipam.go | 42 ++++++++++++++++++- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/internal/controller/ionoscloudmachine_controller.go b/internal/controller/ionoscloudmachine_controller.go index 75fc4e42..a1e81ad9 100644 --- a/internal/controller/ionoscloudmachine_controller.go +++ b/internal/controller/ionoscloudmachine_controller.go @@ -223,6 +223,7 @@ func (r *IonosCloudMachineReconciler) reconcileDelete( return ctrl.Result{RequeueAfter: defaultReconcileDuration}, nil } + ipamHelper := ipam.NewHelper(r.Client, log) reconcileSequence := []serviceReconcileStep[scope.Machine]{ // NOTE(avorima): NICs, which are configured in an IP failover configuration, cannot be deleted // by a request to delete the server. Therefore, during deletion, we need to remove the NIC from @@ -231,6 +232,7 @@ func (r *IonosCloudMachineReconciler) reconcileDelete( {"ReconcileServerDeletion", cloudService.ReconcileServerDeletion}, {"ReconcileLANDeletion", cloudService.ReconcileLANDeletion}, {"ReconcileFailoverIPBlockDeletion", cloudService.ReconcileFailoverIPBlockDeletion}, + {"ReconcileIPAddressClaimsDeletion", ipamHelper.ReconcileIPAddresses}, } for _, step := range reconcileSequence { diff --git a/internal/service/ipam/ipam.go b/internal/service/ipam/ipam.go index 71c36a47..9659a662 100644 --- a/internal/service/ipam/ipam.go +++ b/internal/service/ipam/ipam.go @@ -95,6 +95,44 @@ func (h *Helper) ReconcileIPAddresses(ctx context.Context, machineScope *scope.M return requeue, nil } +func (h *Helper) ReconcileIPAddressClaimsDeletion(ctx context.Context, machineScope *scope.Machine) (err error) { + log := h.logger.WithName("reconcileIPAddressClaimsDeletion") + log.V(4).Info("removing finalizers from IPAddressClaims.") + + formats := []string{IPV4Format, IPV6Format} + nicNames := []string{fmt.Sprintf(PrimaryNICFormat, machineScope.IonosMachine.Name)} + + for _, network := range machineScope.IonosMachine.Spec.AdditionalNetworks { + nicName := fmt.Sprintf(AdditionalNICFormat, machineScope.IonosMachine.Name, network.NetworkID) + nicNames = append(nicNames, nicName) + } + + for _, format := range formats { + for _, nicName := range nicNames { + key := client.ObjectKey{ + Namespace: machineScope.IonosMachine.Namespace, + Name: fmt.Sprintf("%s-%s", nicName, format), + } + + claim, err := h.GetIPAddressClaim(ctx, key) + if err != nil { + if apierrors.IsNotFound(err) { + continue + } + return err + } + + if updated := controllerutil.RemoveFinalizer(claim, infrav1.MachineFinalizer); updated { + if err = h.client.Update(ctx, claim); err != nil { + return err + } + } + } + } + + return nil +} + func (h *Helper) handlePrimaryNIC(ctx context.Context, machineScope *scope.Machine, nics *[]infrav1.NICInfo) (waitForIP bool, err error) { nic := infrav1.NICInfo{Primary: true} ipamConfig := machineScope.IonosMachine.Spec.IPAMConfig @@ -143,7 +181,7 @@ func (h *Helper) handleAdditionalNICs(ctx context.Context, machineScope *scope.M if ip == "" { waitForIP = true } else { - nic.IPv6Addresses = []string{ip} + nic.IPv4Addresses = []string{ip} } } @@ -239,7 +277,7 @@ func (h *Helper) CreateIPAddressClaim(ctx context.Context, owner client.Object, }, } _, err = controllerutil.CreateOrUpdate(ctx, h.client, desired, func() error { - // set the owner reference to the cluster + controllerutil.AddFinalizer(desired, infrav1.MachineFinalizer) return controllerutil.SetControllerReference(owner, desired, h.client.Scheme()) }) From d229aacc88f03b5f96ce98f7862f1b2a768c30ca Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Wed, 24 Jul 2024 10:34:45 +0200 Subject: [PATCH 04/13] Review rework + documentation on ipam usage --- api/v1alpha1/ipam_types.go | 40 --------------- api/v1alpha1/types.go | 21 ++++++++ ...r.x-k8s.io_ionoscloudmachinetemplates.yaml | 8 +-- docs/ipam.md | 47 +++++++++++++++++ .../ionoscloudmachine_controller.go | 10 ++-- internal/service/{ipam => k8s}/ipam.go | 51 ++++++++++--------- internal/service/{ipam => k8s}/ipam_test.go | 2 +- 7 files changed, 104 insertions(+), 75 deletions(-) delete mode 100644 api/v1alpha1/ipam_types.go create mode 100644 docs/ipam.md rename internal/service/{ipam => k8s}/ipam.go (88%) rename internal/service/{ipam => k8s}/ipam_test.go (99%) diff --git a/api/v1alpha1/ipam_types.go b/api/v1alpha1/ipam_types.go deleted file mode 100644 index 20f2cf0a..00000000 --- a/api/v1alpha1/ipam_types.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2024 IONOS Cloud. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" -) - -// IPAMConfig contains the config for ip address management. -type IPAMConfig struct { - // IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. - // The nic will use an available IP address from the referenced pool. - // +kubebuilder:validation:XValidation:rule="self.apiGroup == 'ipam.cluster.x-k8s.io'",message="ipv4PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io" - // +kubebuilder:validation:XValidation:rule="self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool'",message="ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool" - // +kubebuilder:validation:XValidation:rule="self.name != ''",message="ipv4PoolRef.name is required" - // +optional - IPv4PoolRef *corev1.TypedLocalObjectReference `json:"ipv4PoolRef,omitempty"` - - // IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. - // The nic will use an available IP address from the referenced pool. - // +kubebuilder:validation:XValidation:rule="self.apiGroup == 'ipam.cluster.x-k8s.io'",message="ipv6PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io" - // +kubebuilder:validation:XValidation:rule="self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool'",message="ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool" - // +kubebuilder:validation:XValidation:rule="self.name != ''",message="ipv6PoolRef.name is required" - // +optional - IPv6PoolRef *corev1.TypedLocalObjectReference `json:"ipv6PoolRef,omitempty"` -} diff --git a/api/v1alpha1/types.go b/api/v1alpha1/types.go index 4fd6c6b5..9eafdceb 100644 --- a/api/v1alpha1/types.go +++ b/api/v1alpha1/types.go @@ -16,6 +16,8 @@ limitations under the License. package v1alpha1 +import corev1 "k8s.io/api/core/v1" + // ProvisioningRequest is a definition of a provisioning request // in the IONOS Cloud. type ProvisioningRequest struct { @@ -30,3 +32,22 @@ type ProvisioningRequest struct { //+optional State string `json:"state,omitempty"` } + +// IPAMConfig optionally defines which IP Pools to use. +type IPAMConfig struct { + // IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. + // The NIC will use an available IP address from the referenced pool. + // +kubebuilder:validation:XValidation:rule="self.apiGroup == 'ipam.cluster.x-k8s.io'",message="ipv4PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io" + // +kubebuilder:validation:XValidation:rule="self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool'",message="ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool" + // +kubebuilder:validation:XValidation:rule="self.name != ''",message="ipv4PoolRef.name is required" + // +optional + IPv4PoolRef *corev1.TypedLocalObjectReference `json:"ipv4PoolRef,omitempty"` + + // IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. + // The NIC will use an available IP address from the referenced pool. + // +kubebuilder:validation:XValidation:rule="self.apiGroup == 'ipam.cluster.x-k8s.io'",message="ipv6PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io" + // +kubebuilder:validation:XValidation:rule="self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool'",message="ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool" + // +kubebuilder:validation:XValidation:rule="self.name != ''",message="ipv6PoolRef.name is required" + // +optional + IPv6PoolRef *corev1.TypedLocalObjectReference `json:"ipv6PoolRef,omitempty"` +} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml index c7bd65ab..a3191698 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml @@ -83,7 +83,7 @@ spec: ipv4PoolRef: description: |- IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. - The nic will use an available IP address from the referenced pool. + The NIC will use an available IP address from the referenced pool. properties: apiGroup: description: |- @@ -117,7 +117,7 @@ spec: ipv6PoolRef: description: |- IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. - The nic will use an available IP address from the referenced pool. + The NIC will use an available IP address from the referenced pool. properties: apiGroup: description: |- @@ -251,7 +251,7 @@ spec: ipv4PoolRef: description: |- IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. - The nic will use an available IP address from the referenced pool. + The NIC will use an available IP address from the referenced pool. properties: apiGroup: description: |- @@ -280,7 +280,7 @@ spec: ipv6PoolRef: description: |- IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. - The nic will use an available IP address from the referenced pool. + The NIC will use an available IP address from the referenced pool. properties: apiGroup: description: |- diff --git a/docs/ipam.md b/docs/ipam.md new file mode 100644 index 00000000..157e0a7a --- /dev/null +++ b/docs/ipam.md @@ -0,0 +1,47 @@ +# What is IPAM? +IPAM (IP Address Management) is a system used to manage IP address allocation and tracking within a network. In Kubernetes, IPAM is crucial for managing IP addresses across dynamic and often ephemeral workloads, ensuring each network interface within the cluster is assigned a unique and valid IP address. + +## Why Use IPAM? +- **Automation**: Simplifies network configuration by automating IP assignment. +- **Scalability**: Supports dynamic scaling of clusters by efficiently managing IP addresses. +- **Flexibility**: Works with various network topologies and can integrate with both cloud-based and on-premises IPAM solutions. +- **Security**: Reduces the risk of IP conflicts and unauthorized access by ensuring each node and pod has a unique IP address. + +## Prerequisites for Using IPAM in Kubernetes +To use IPAM, you need an IPAM provider. One such provider is the [Cluster API IPAM Provider In-Cluster](https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster). This provider allows Kubernetes to integrate IPAM functionalities directly into its cluster management workflow. + +## Setting Up an IPAM Provider +- **Install the IPAM Provider**: Deploy the IPAM provider in your Kubernetes cluster. This will typically involve deploying custom controllers and CRDs (Custom Resource Definitions) to manage IP pools. +- **Create IP Pools**: Define IP pools that will be used for assigning IPs to network interfaces. These can be specific to a cluster (InClusterIPPool) or shared across clusters (GlobalInClusterIPPool). + +## Using IPAM with IonosCloudMachine +### Example YAML Configuration +Let's explore how to integrate IPAM with your IonosCloudMachine resource in Kubernetes. +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: IonosCloudMachine +metadata: + name: example-machine +spec: + ipv4PoolRef: + apiGroup: ipam.cluster.x-k8s.io + kind: InClusterIPPool + name: primary-node-ips + additionalNetworks: + - networkID: 3 + ipv4PoolRef: + apiGroup: ipam.cluster.x-k8s.io + kind: InClusterIPPool + name: additional-node-ips +``` + ### Explanation of Configuration + +```yaml +ipv4PoolRef: + apiGroup: ipam.cluster.x-k8s.io + kind: InClusterIPPool + name: primary-node-ips +``` +- **apiGroup**: Specifies the API group for the IPAM configuration, ensuring the correct API resources are targeted. +- **kind**: The type of IP pool being referenced. In this case, it's InClusterIPPool, which is specific to the current cluster. +- **name**: The name of the IP pool (primary-node-ips) from which the primary NIC will obtain its IP address. diff --git a/internal/controller/ionoscloudmachine_controller.go b/internal/controller/ionoscloudmachine_controller.go index a1e81ad9..885b6f87 100644 --- a/internal/controller/ionoscloudmachine_controller.go +++ b/internal/controller/ionoscloudmachine_controller.go @@ -36,7 +36,7 @@ import ( infrav1 "github.com/ionos-cloud/cluster-api-provider-ionoscloud/api/v1alpha1" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/service/cloud" - "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/service/ipam" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/service/k8s" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/util/locker" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" ) @@ -181,10 +181,10 @@ func (r *IonosCloudMachineReconciler) reconcileNormal( return ctrl.Result{RequeueAfter: defaultReconcileDuration}, nil } - ipamHelper := ipam.NewHelper(r.Client, log) + k8sHelper := k8s.NewHelper(r.Client, log) reconcileSequence := []serviceReconcileStep[scope.Machine]{ {"ReconcileLAN", cloudService.ReconcileLAN}, - {"ReconcileIPAddressClaims", ipamHelper.ReconcileIPAddresses}, + {"ReconcileIPAddressClaims", k8sHelper.ReconcileIPAddresses}, {"ReconcileServer", cloudService.ReconcileServer}, {"ReconcileIPFailover", cloudService.ReconcileIPFailover}, {"FinalizeMachineProvisioning", cloudService.FinalizeMachineProvisioning}, @@ -223,7 +223,7 @@ func (r *IonosCloudMachineReconciler) reconcileDelete( return ctrl.Result{RequeueAfter: defaultReconcileDuration}, nil } - ipamHelper := ipam.NewHelper(r.Client, log) + ipamHelper := k8s.NewHelper(r.Client, log) reconcileSequence := []serviceReconcileStep[scope.Machine]{ // NOTE(avorima): NICs, which are configured in an IP failover configuration, cannot be deleted // by a request to delete the server. Therefore, during deletion, we need to remove the NIC from @@ -232,7 +232,7 @@ func (r *IonosCloudMachineReconciler) reconcileDelete( {"ReconcileServerDeletion", cloudService.ReconcileServerDeletion}, {"ReconcileLANDeletion", cloudService.ReconcileLANDeletion}, {"ReconcileFailoverIPBlockDeletion", cloudService.ReconcileFailoverIPBlockDeletion}, - {"ReconcileIPAddressClaimsDeletion", ipamHelper.ReconcileIPAddresses}, + {"ReconcileIPAddressClaimsDeletion", ipamHelper.ReconcileIPAddressClaimsDeletion}, } for _, step := range reconcileSequence { diff --git a/internal/service/ipam/ipam.go b/internal/service/k8s/ipam.go similarity index 88% rename from internal/service/ipam/ipam.go rename to internal/service/k8s/ipam.go index 9659a662..5ddea900 100644 --- a/internal/service/ipam/ipam.go +++ b/internal/service/k8s/ipam.go @@ -14,8 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package ipam offers services for IPAM management. -package ipam +// Package k8s offers services to interact with kubernetes. +package k8s import ( "context" @@ -36,17 +36,17 @@ import ( ) const ( - // PrimaryNICFormat is the format used for IPAddressClaims for the primary nic. - PrimaryNICFormat = "nic-%s" + // primaryNICFormat is the format used for IPAddressClaims for the primary NIC. + primaryNICFormat = "nic-%s" - // AdditionalNICFormat is the format used for IPAddressClaims for additional nics. - AdditionalNICFormat = "nic-%s-%d" + // additionalNICFormat is the format used for IPAddressClaims for additional nics. + additionalNICFormat = "nic-%s-%d" - // IPV4Format is the IP v4 format. - IPV4Format = "ipv4" + // ipV4Format is the IP v4 format. + ipV4Format = "ipv4" - // IPV6Format is the IP v6 format. - IPV6Format = "ipv6" + // ipV6Format is the IP v6 format. + ipV6Format = "ipv6" ) // Helper offers IP address management services for IONOS Cloud machine reconciliation. @@ -95,15 +95,16 @@ func (h *Helper) ReconcileIPAddresses(ctx context.Context, machineScope *scope.M return requeue, nil } -func (h *Helper) ReconcileIPAddressClaimsDeletion(ctx context.Context, machineScope *scope.Machine) (err error) { +// ReconcileIPAddressClaimsDeletion removes the MachineFinalizer from the IPAddressClaims. +func (h *Helper) ReconcileIPAddressClaimsDeletion(ctx context.Context, machineScope *scope.Machine) (requeue bool, err error) { log := h.logger.WithName("reconcileIPAddressClaimsDeletion") log.V(4).Info("removing finalizers from IPAddressClaims.") - formats := []string{IPV4Format, IPV6Format} - nicNames := []string{fmt.Sprintf(PrimaryNICFormat, machineScope.IonosMachine.Name)} + formats := []string{ipV4Format, ipV6Format} + nicNames := []string{fmt.Sprintf(primaryNICFormat, machineScope.IonosMachine.Name)} for _, network := range machineScope.IonosMachine.Spec.AdditionalNetworks { - nicName := fmt.Sprintf(AdditionalNICFormat, machineScope.IonosMachine.Name, network.NetworkID) + nicName := fmt.Sprintf(additionalNICFormat, machineScope.IonosMachine.Name, network.NetworkID) nicNames = append(nicNames, nicName) } @@ -119,28 +120,28 @@ func (h *Helper) ReconcileIPAddressClaimsDeletion(ctx context.Context, machineSc if apierrors.IsNotFound(err) { continue } - return err + return true, err } if updated := controllerutil.RemoveFinalizer(claim, infrav1.MachineFinalizer); updated { if err = h.client.Update(ctx, claim); err != nil { - return err + return true, err } } } } - return nil + return false, nil } func (h *Helper) handlePrimaryNIC(ctx context.Context, machineScope *scope.Machine, nics *[]infrav1.NICInfo) (waitForIP bool, err error) { nic := infrav1.NICInfo{Primary: true} ipamConfig := machineScope.IonosMachine.Spec.IPAMConfig - nicName := fmt.Sprintf(PrimaryNICFormat, machineScope.IonosMachine.Name) + nicName := fmt.Sprintf(primaryNICFormat, machineScope.IonosMachine.Name) - // default NIC ipv4. + // default NIC IPv4. if ipamConfig.IPv4PoolRef != nil { - ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, IPV4Format, ipamConfig.IPv4PoolRef) + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, ipV4Format, ipamConfig.IPv4PoolRef) if err != nil { return false, err } @@ -151,9 +152,9 @@ func (h *Helper) handlePrimaryNIC(ctx context.Context, machineScope *scope.Machi } } - // default NIC ipv6. + // default NIC IPv6. if ipamConfig.IPv6PoolRef != nil { - ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, IPV6Format, ipamConfig.IPv6PoolRef) + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, ipV6Format, ipamConfig.IPv6PoolRef) if err != nil { return false, err } @@ -172,9 +173,9 @@ func (h *Helper) handlePrimaryNIC(ctx context.Context, machineScope *scope.Machi func (h *Helper) handleAdditionalNICs(ctx context.Context, machineScope *scope.Machine, nics *[]infrav1.NICInfo) (waitForIP bool, err error) { for _, net := range machineScope.IonosMachine.Spec.AdditionalNetworks { nic := infrav1.NICInfo{Primary: false} - nicName := fmt.Sprintf(AdditionalNICFormat, machineScope.IonosMachine.Name, net.NetworkID) + nicName := fmt.Sprintf(additionalNICFormat, machineScope.IonosMachine.Name, net.NetworkID) if net.IPv4PoolRef != nil { - ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, IPV4Format, net.IPv4PoolRef) + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, ipV4Format, net.IPv4PoolRef) if err != nil { return false, errors.Join(err, fmt.Errorf("unable to handle IPv4Address for nic %s", nicName)) } @@ -186,7 +187,7 @@ func (h *Helper) handleAdditionalNICs(ctx context.Context, machineScope *scope.M } if net.IPv6PoolRef != nil { - ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, IPV6Format, net.IPv6PoolRef) + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, ipV6Format, net.IPv6PoolRef) if err != nil { return false, errors.Join(err, fmt.Errorf("unable to handle IPv6Address for nic %s", nicName)) } diff --git a/internal/service/ipam/ipam_test.go b/internal/service/k8s/ipam_test.go similarity index 99% rename from internal/service/ipam/ipam_test.go rename to internal/service/k8s/ipam_test.go index 9464ea9f..68e0506f 100644 --- a/internal/service/ipam/ipam_test.go +++ b/internal/service/k8s/ipam_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package ipam +package k8s import ( "context" From d891113b2827bfc976a9045d234425662c3f9b40 Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Fri, 9 Aug 2024 13:34:10 +0200 Subject: [PATCH 05/13] Add clustername label to ipaddressclaims to make clusterctl move work --- internal/service/k8s/ipam.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/internal/service/k8s/ipam.go b/internal/service/k8s/ipam.go index 5ddea900..e0b6207f 100644 --- a/internal/service/k8s/ipam.go +++ b/internal/service/k8s/ipam.go @@ -27,6 +27,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -219,7 +220,7 @@ func (h *Helper) handleIPAddressForNIC(ctx context.Context, machineScope *scope. return "", err } log.V(4).Info("IPAddressClaim not found, creating it.", "nic", nic) - err = h.CreateIPAddressClaim(ctx, machineScope.IonosMachine, key.Name, poolRef) + err = h.CreateIPAddressClaim(ctx, machineScope.IonosMachine, key.Name, machineScope.ClusterScope.Cluster.Name, poolRef) if err != nil { return "", errors.Join(err, fmt.Errorf("unable to create IPAddressClaim for machine %s", machineScope.IonosMachine.Name)) } @@ -251,12 +252,11 @@ func (h *Helper) handleIPAddressForNIC(ctx context.Context, machineScope *scope. } // CreateIPAddressClaim creates an IPAddressClaim for a given object. -func (h *Helper) CreateIPAddressClaim(ctx context.Context, owner client.Object, name string, poolRef *corev1.TypedLocalObjectReference) error { +func (h *Helper) CreateIPAddressClaim(ctx context.Context, owner client.Object, name string, cluster string, poolRef *corev1.TypedLocalObjectReference) error { claimRef := types.NamespacedName{ Namespace: owner.GetNamespace(), Name: name, } - ipAddrClaim := &ipamv1.IPAddressClaim{} var err error if err = h.client.Get(ctx, claimRef, ipAddrClaim); err != nil && !apierrors.IsNotFound(err) { @@ -272,6 +272,7 @@ func (h *Helper) CreateIPAddressClaim(ctx context.Context, owner client.Object, ObjectMeta: metav1.ObjectMeta{ Name: claimRef.Name, Namespace: claimRef.Namespace, + Labels: map[string]string{clusterv1.ClusterNameLabel: cluster}, }, Spec: ipamv1.IPAddressClaimSpec{ PoolRef: *poolRef, From 82098f9a3461715672fe1d7fe1c8037460bb0c6f Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Tue, 27 Aug 2024 10:18:42 +0200 Subject: [PATCH 06/13] Add e2e test --- test/e2e/capic_test.go | 25 + test/e2e/config/ionoscloud.yaml | 14 + .../cluster-template-ipam.yaml | 465 ++++++++++++++++++ .../data/shared/capi-ipam/v0.1/metadata.yaml | 12 + test/e2e/env_test.go | 10 +- test/e2e/suite_test.go | 3 + 6 files changed, 527 insertions(+), 2 deletions(-) create mode 100644 test/e2e/data/infrastructure-ionoscloud/cluster-template-ipam.yaml create mode 100644 test/e2e/data/shared/capi-ipam/v0.1/metadata.yaml diff --git a/test/e2e/capic_test.go b/test/e2e/capic_test.go index 1295cc11..6e84b759 100644 --- a/test/e2e/capic_test.go +++ b/test/e2e/capic_test.go @@ -22,6 +22,8 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "os" + clusterctlcluster "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" capie2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" @@ -95,3 +97,26 @@ var _ = Describe("Should be able to create a cluster with 1 control-plane and 1 } }) }) + +var _ = Describe("Should be able to create a cluster with 1 control-plane using an IP from the IPAddressPool", func() { + capie2e.QuickStartSpec(ctx, func() capie2e.QuickStartSpecInput { + return capie2e.QuickStartSpecInput{ + E2EConfig: e2eConfig, + ControlPlaneMachineCount: ptr.To(int64(1)), + WorkerMachineCount: ptr.To(int64(0)), + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + Flavor: ptr.To("ipam"), + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + PostNamespaceCreated: cloudEnv.createCredentialsSecretPNC, + PostMachinesProvisioned: func(managementClusterProxy framework.ClusterProxy, namespace, clusterName string) { + machines := &infrav1.IonosCloudMachineList{} + Expect(managementClusterProxy.GetClient().List(ctx, machines, runtimeclient.InNamespace(namespace))).NotTo(HaveOccurred()) + nic := machines.Items[0].Status.MachineNetworkInfo.NICInfo[0] + desired := os.Getenv("ADDITIONAL_IPS") + Expect(nic.IPv4Addresses).To(ContainElement(desired)) + }, + } + }) +}) diff --git a/test/e2e/config/ionoscloud.yaml b/test/e2e/config/ionoscloud.yaml index bbd49a2d..49208d8a 100644 --- a/test/e2e/config/ionoscloud.yaml +++ b/test/e2e/config/ionoscloud.yaml @@ -38,6 +38,19 @@ providers: new: --metrics-addr=:8443 files: - sourcePath: "../data/shared/v1.7/metadata.yaml" + - name: in-cluster + type: IPAMProvider + versions: + - name: "{go://sigs.k8s.io/cluster-api-ipam-provider-in-cluster@v0.1}" # supported release in the v1alpha2 series + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster/releases/download/{go://sigs.k8s.io/cluster-api-ipam-provider-in-cluster@v0.1}/ipam-components.yaml" + type: url + contract: v1beta1 + files: + - sourcePath: "../data/shared/capi-ipam/v0.1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" - name: ionoscloud type: InfrastructureProvider versions: @@ -51,6 +64,7 @@ providers: files: - sourcePath: "../../../metadata.yaml" - sourcePath: "../data/infrastructure-ionoscloud/cluster-template.yaml" + - sourcePath: "../data/infrastructure-ionoscloud/cluster-template-ipam.yaml" variables: # Default variables for the e2e test; those values could be overridden via env variables, thus # allowing the same e2e config file to be re-used in different Prow jobs e.g. each one with a K8s version permutation. diff --git a/test/e2e/data/infrastructure-ionoscloud/cluster-template-ipam.yaml b/test/e2e/data/infrastructure-ionoscloud/cluster-template-ipam.yaml new file mode 100644 index 00000000..7d1f28cf --- /dev/null +++ b/test/e2e/data/infrastructure-ionoscloud/cluster-template-ipam.yaml @@ -0,0 +1,465 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + cni: "${CLUSTER_NAME}-crs-0" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: IonosCloudCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: IonosCloudCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_HOST:-${CONTROL_PLANE_ENDPOINT_IP}} + port: ${CONTROL_PLANE_ENDPOINT_PORT:-6443} + location: ${CONTROL_PLANE_ENDPOINT_LOCATION} + credentialsRef: + name: "ionoscloud-credentials" +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + machineTemplate: + infrastructureRef: + kind: IonosCloudMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + users: + - name: root + sshAuthorizedKeys: [${IONOSCLOUD_MACHINE_SSH_KEYS}] + ntp: + enabled: true + servers: + - 0.de.pool.ntp.org + - 1.de.pool.ntp.org + - 2.de.pool.ntp.org + - 3.de.pool.ntp.org + files: + - path: /etc/ssh/sshd_config.d/ssh-audit_hardening.conf + owner: root:root + permissions: '0644' + content: | + # Restrict key exchange, cipher, and MAC algorithms, as per sshaudit.com + # hardening guide. + KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256,curve25519-sha256@libssh.org,gss-curve25519-sha256-,diffie-hellman-group16-sha512,gss-group16-sha512-,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha256 + Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr + MACs hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,umac-128-etm@openssh.com + HostKeyAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-256-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + CASignatureAlgorithms sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + GSSAPIKexAlgorithms gss-curve25519-sha256-,gss-group16-sha512- + HostbasedAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + PubkeyAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + - path: /etc/sysctl.d/k8s.conf + content: | + fs.inotify.max_user_watches = 65536 + net.netfilter.nf_conntrack_max = 1000000 + - path: /etc/modules-load.d/k8s.conf + content: | + ip_vs + ip_vs_rr + ip_vs_wrr + ip_vs_sh + ip_vs_sed + # Crictl config + - path: /etc/crictl.yaml + content: | + runtime-endpoint: unix:///run/containerd/containerd.sock + timeout: 10 + - path: /etc/kubernetes/manifests/kube-vip.yaml + owner: root:root + content: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: cp_enable + value: "true" + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE=""} + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: port + value: "${CONTROL_PLANE_ENDPOINT_PORT:-6443}" + - name: vip_arp + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + image: ghcr.io/kube-vip/kube-vip:v0.7.1 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + - localhost + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: FileOrCreate + name: kubeconfig + status: {} + - path: /etc/kube-vip-prepare.sh + content: | + #!/bin/bash + + # Copyright 2020 The Kubernetes Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + set -e + + # Configure the workaround required for kubeadm init with kube-vip: + # xref: https://github.com/kube-vip/kube-vip/issues/684 + + # Nothing to do for kubernetes < v1.29 + KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)" + if [[ "$KUBEADM_MINOR" -lt "29" ]]; then + exit 0 + fi + + IS_KUBEADM_INIT="false" + + # cloud-init kubeadm init + if [[ -f /run/kubeadm/kubeadm.yaml ]]; then + IS_KUBEADM_INIT="true" + fi + + # ignition kubeadm init + if [[ -f /etc/kubeadm.sh ]] && grep -q -e "kubeadm init" /etc/kubeadm.sh; then + IS_KUBEADM_INIT="true" + fi + + if [[ "$IS_KUBEADM_INIT" == "true" ]]; then + sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' \ + /etc/kubernetes/manifests/kube-vip.yaml + fi + owner: root:root + permissions: "0700" + + # CSI Metadata config + - content: | + { + "datacenter-id": "${IONOSCLOUD_DATACENTER_ID}" + } + owner: root:root + path: /etc/ie-csi/cfg.json + permissions: '0644' + + - content: | + #!/bin/bash + set -e + + # Nothing to do for kubernetes < v1.29 + KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)" + if [[ "$KUBEADM_MINOR" -lt "29" ]]; then + exit 0 + fi + + NODE_IPv4_ADDRESS=$(ip -j addr show dev ens6 | jq -r '.[].addr_info[] | select(.family == "inet") | select(.scope=="global") | select(.dynamic) | .local') + if [[ $NODE_IPv4_ADDRESS ]]; then + sed -i '$ s/$/ --node-ip '"$NODE_IPv4_ADDRESS"'/' /etc/default/kubelet + fi + # IPv6 currently not set, the ip is not set then this runs. Needs to be waited for. + NODE_IPv6_ADDRESS=$(ip -j addr show dev ens6 | jq -r '.[].addr_info[] | select(.family == "inet6") | select(.scope=="global") | .local') + if [[ $NODE_IPv6_ADDRESS ]]; then + sed -i '$ s/$/ --node-ip '"$NODE_IPv6_ADDRESS"'/' /etc/default/kubelet + fi + owner: root:root + path: /etc/set-node-ip.sh + permissions: '0700' + + preKubeadmCommands: + - systemctl restart systemd-networkd.service systemd-modules-load.service systemd-journald containerd + # disable swap + - swapoff -a + - sed -i '/ swap / s/^/#/' /etc/fstab + - sysctl --system + - /etc/kube-vip-prepare.sh + # workaround 1.29 IP issue + - /etc/set-node-ip.sh + postKubeadmCommands: + - > + sed -i 's#path: /etc/kubernetes/super-admin.conf#path: /etc/kubernetes/admin.conf#' \ + /etc/kubernetes/manifests/kube-vip.yaml + - > + systemctl disable --now udisks2 multipathd motd-news.timer fwupd-refresh.timer + packagekit ModemManager snapd snapd.socket snapd.apparmor snapd.seeded + # INFO(schegi-ionos): We decided to not remove this for now, since removing this would require the ccm to be installed for cluster-api + # to continue after the first node. + - export system_uuid=$(kubectl --kubeconfig /etc/kubernetes/kubelet.conf get node $(hostname) -ojsonpath='{..systemUUID }') + - > + kubectl --kubeconfig /etc/kubernetes/kubelet.conf + patch node $(hostname) + --type strategic -p '{"spec": {"providerID": "ionos://'$${system_uuid}'"}}' + - rm /etc/ssh/ssh_host_* + - ssh-keygen -t rsa -b 4096 -f /etc/ssh/ssh_host_rsa_key -N "" + - ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N "" + - sed -i 's/^\#HostKey \/etc\/ssh\/ssh_host_\(rsa\|ed25519\)_key$/HostKey \/etc\/ssh\/ssh_host_\1_key/g' /etc/ssh/sshd_config + - awk '$5 >= 3071' /etc/ssh/moduli > /etc/ssh/moduli.safe + - mv /etc/ssh/moduli.safe /etc/ssh/moduli + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - apt-get update + - DEBIAN_FRONTEND=noninteractive apt-get install -q -y netfilter-persistent iptables-persistent + - service netfilter-persistent save + - systemctl restart sshd + initConfiguration: + localAPIEndpoint: + bindPort: ${CONTROL_PLANE_ENDPOINT_PORT:-6443} + nodeRegistration: + kubeletExtraArgs: + # use cloud-provider: external when using a CCM + cloud-provider: "" + joinConfiguration: + nodeRegistration: + criSocket: unix:///run/containerd/containerd.sock + kubeletExtraArgs: + # use cloud-provider: external when using a CCM + cloud-provider: "" + version: "${KUBERNETES_VERSION}" +--- +kind: IonosCloudMachineTemplate +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + ipv4PoolRef: + apiGroup: ipam.cluster.x-k8s.io + kind: InClusterIPPool + name: ${CLUSTER_NAME} + datacenterID: ${IONOSCLOUD_DATACENTER_ID} + numCores: ${IONOSCLOUD_MACHINE_NUM_CORES:-4} + memoryMB: ${IONOSCLOUD_MACHINE_MEMORY_MB:-8192} + disk: + image: + id: ${IONOSCLOUD_MACHINE_IMAGE_ID} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-workers" + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + node-role.kubernetes.io/node: "" + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-worker" + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-worker" + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: IonosCloudMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: IonosCloudMachineTemplate +metadata: + name: "${CLUSTER_NAME}-worker" +spec: + template: + spec: + ipv4PoolRef: + apiGroup: ipam.cluster.x-k8s.io + kind: InClusterIPPool + name: ${CLUSTER_NAME} + datacenterID: ${IONOSCLOUD_DATACENTER_ID} + numCores: ${IONOSCLOUD_MACHINE_NUM_CORES:-2} + memoryMB: ${IONOSCLOUD_MACHINE_MEMORY_MB:-4096} + disk: + image: + id: ${IONOSCLOUD_MACHINE_IMAGE_ID} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-worker" +spec: + template: + spec: + users: + - name: root + sshAuthorizedKeys: [${IONOSCLOUD_MACHINE_SSH_KEYS}] + ntp: + enabled: true + servers: + - 0.de.pool.ntp.org + - 1.de.pool.ntp.org + - 2.de.pool.ntp.org + - 3.de.pool.ntp.org + files: + - path: /etc/ssh/sshd_config.d/ssh-audit_hardening.conf + owner: root:root + permissions: '0644' + content: | + # Restrict key exchange, cipher, and MAC algorithms, as per sshaudit.com + # hardening guide. + KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256,curve25519-sha256@libssh.org,gss-curve25519-sha256-,diffie-hellman-group16-sha512,gss-group16-sha512-,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha256 + Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr + MACs hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,umac-128-etm@openssh.com + HostKeyAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-256-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + CASignatureAlgorithms sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + GSSAPIKexAlgorithms gss-curve25519-sha256-,gss-group16-sha512- + HostbasedAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + PubkeyAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + - path: /etc/sysctl.d/k8s.conf + content: | + fs.inotify.max_user_watches = 65536 + net.netfilter.nf_conntrack_max = 1000000 + - path: /etc/modules-load.d/k8s.conf + content: | + ip_vs + ip_vs_rr + ip_vs_wrr + ip_vs_sh + ip_vs_sed + # Crictl config + - path: /etc/crictl.yaml + content: | + runtime-endpoint: unix:///run/containerd/containerd.sock + timeout: 10 + # CSI Metadata config + - content: | + { + "datacenter-id": "${IONOSCLOUD_DATACENTER_ID}" + } + owner: root:root + path: /etc/ie-csi/cfg.json + permissions: '0644' + preKubeadmCommands: + - systemctl restart systemd-networkd.service systemd-modules-load.service systemd-journald containerd + # disable swap + - swapoff -a + - sed -i '/ swap / s/^/#/' /etc/fstab + - sysctl --system + postKubeadmCommands: + - > + systemctl disable --now udisks2 multipathd motd-news.timer fwupd-refresh.timer + packagekit ModemManager snapd snapd.socket snapd.apparmor snapd.seeded + # INFO(schegi-ionos): We decided to not remove this for now, since removing this would require the ccm to be + # installed for cluster-api to continue after the first node. + - export system_uuid=$(kubectl --kubeconfig /etc/kubernetes/kubelet.conf get node $(hostname) -ojsonpath='{..systemUUID }') + - > + kubectl --kubeconfig /etc/kubernetes/kubelet.conf + patch node $(hostname) + --type strategic -p '{"spec": {"providerID": "ionos://'$${system_uuid}'"}}' + - rm /etc/ssh/ssh_host_* + - ssh-keygen -t rsa -b 4096 -f /etc/ssh/ssh_host_rsa_key -N "" + - ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N "" + - sed -i 's/^\#HostKey \/etc\/ssh\/ssh_host_\(rsa\|ed25519\)_key$/HostKey \/etc\/ssh\/ssh_host_\1_key/g' /etc/ssh/sshd_config + - awk '$5 >= 3071' /etc/ssh/moduli > /etc/ssh/moduli.safe + - mv /etc/ssh/moduli.safe /etc/ssh/moduli + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - apt-get update + - DEBIAN_FRONTEND=noninteractive apt-get install -q -y netfilter-persistent iptables-persistent + - service netfilter-persistent save + - systemctl restart sshd + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + # use cloud-provider: external when using a CCM + cloud-provider: "" + criSocket: unix:///run/containerd/containerd.sock +--- +# ConfigMap object referenced by the ClusterResourceSet object and with +# the CNI resource defined in the test config file +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-0" +data: ${CNI_RESOURCES} +--- +# ClusterResourceSet object with +# a selector that targets all the Cluster with label cni=${CLUSTER_NAME}-crs-0 +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: "${CLUSTER_NAME}-crs-0" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-0" + resources: + - name: "cni-${CLUSTER_NAME}-crs-0" + kind: ConfigMap +--- +apiVersion: ipam.cluster.x-k8s.io/v1alpha2 +kind: InClusterIPPool +metadata: + name: ${CLUSTER_NAME} +spec: + prefix: ${IPAM_PREFIX:-24} + addresses: + - ${ADDITIONAL_IPS} diff --git a/test/e2e/data/shared/capi-ipam/v0.1/metadata.yaml b/test/e2e/data/shared/capi-ipam/v0.1/metadata.yaml new file mode 100644 index 00000000..56d61ac6 --- /dev/null +++ b/test/e2e/data/shared/capi-ipam/v0.1/metadata.yaml @@ -0,0 +1,12 @@ +# maps release series of major.minor to cluster-api contract version +# the contract version may change between minor or major versions, but *not* +# between patch versions. +# +# update this file only when a new major or minor version is released +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +kind: Metadata +releaseSeries: + - major: 0 + minor: 1 + contract: v1beta1 + diff --git a/test/e2e/env_test.go b/test/e2e/env_test.go index 3a5844c1..f6e4f105 100644 --- a/test/e2e/env_test.go +++ b/test/e2e/env_test.go @@ -23,6 +23,7 @@ import ( "context" "fmt" "os" + "strings" "github.com/google/uuid" corev1 "k8s.io/api/core/v1" @@ -64,7 +65,7 @@ func (e *ionosCloudEnv) setup() { dcRequest := e.createDatacenter(ctx, location) By("Requesting an IP block") - ipbRequest := e.reserveIPBlock(ctx, location, 1) + ipbRequest := e.reserveIPBlock(ctx, location, 2) By("Waiting for requests to complete") e.waitForCreationRequests(ctx, dcRequest, ipbRequest) @@ -134,7 +135,12 @@ func (e *ionosCloudEnv) reserveIPBlock(ctx context.Context, location string, siz if os.Getenv("CI") == "true" { e.writeToGithubOutput("IP_BLOCK_ID", *e.ipBlock.Id) } - Expect(os.Setenv("CONTROL_PLANE_ENDPOINT_IP", (*e.ipBlock.Properties.Ips)[0])).ToNot(HaveOccurred(), "Failed setting datacenter ID in environment variable") + + ips := (*e.ipBlock.Properties.Ips) + Expect(os.Setenv("CONTROL_PLANE_ENDPOINT_IP", ips[0])).ToNot(HaveOccurred(), "Failed setting control plane endpoint ID in environment variable") + if len(ips) > 1 { + Expect(os.Setenv("ADDITIONAL_IPS", strings.Join(ips[1:], ","))).ToNot(HaveOccurred(), "Failed setting additional IPs in environment variable") + } return res.Header.Get(apiLocationHeaderKey) } diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index a205b516..44ebc644 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -34,6 +34,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/bootstrap" "sigs.k8s.io/cluster-api/test/framework/clusterctl" @@ -193,6 +194,7 @@ var _ = SynchronizedAfterSuite(func() { func initScheme() *runtime.Scheme { s := runtime.NewScheme() framework.TryAddDefaultSchemes(s) + Expect(ipamv1.AddToScheme(s)).To(Succeed()) Expect(infrav1.AddToScheme(s)).To(Succeed()) return s } @@ -248,6 +250,7 @@ func initBootstrapCluster() { clusterctl.InitManagementClusterAndWatchControllerLogs(watchesCtx, clusterctl.InitManagementClusterAndWatchControllerLogsInput{ ClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: clusterctlConfigPath, + IPAMProviders: e2eConfig.IPAMProviders(), InfrastructureProviders: e2eConfig.InfrastructureProviders(), LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), }, e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...) From 1e46ddb5a6569765cea2da25af5b64816c47fd50 Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Tue, 27 Aug 2024 13:47:16 +0200 Subject: [PATCH 07/13] Fix typo --- test/e2e/env_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/env_test.go b/test/e2e/env_test.go index f6e4f105..8de30bf9 100644 --- a/test/e2e/env_test.go +++ b/test/e2e/env_test.go @@ -88,7 +88,7 @@ func (e *ionosCloudEnv) teardown() { func (e *ionosCloudEnv) createDatacenter(ctx context.Context, location string) (requestLocation string) { name := fmt.Sprintf("capic-e2e-test-%s", uuid.New().String()) - description := "used in a CACIC E2E test run" + description := "used in a CAPIC E2E test run" if os.Getenv("CI") == "true" { name = fmt.Sprintf("capic-e2e-test-%s", os.Getenv("GITHUB_RUN_ID")) description = fmt.Sprintf("CI run: %s", e.githubCIRunURL()) From d17bae17adc0dcdc4fef94c6e5ce9cad2e1f168c Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Wed, 28 Aug 2024 10:48:06 +0200 Subject: [PATCH 08/13] Fix crds --- api/v1alpha1/zz_generated.deepcopy.go | 2 +- ...tructure.cluster.x-k8s.io_ionoscloudmachines.yaml | 12 ++++++------ ....cluster.x-k8s.io_ionoscloudmachinetemplates.yaml | 12 ++++++------ internal/service/cloud/server.go | 8 ++++---- test/e2e/capic_test.go | 2 +- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 03121d12..0ad7b4e3 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -589,7 +589,6 @@ func (in *NICInfo) DeepCopy() *NICInfo { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Network) DeepCopyInto(out *Network) { *out = *in - in.IPAMConfig.DeepCopyInto(&out.IPAMConfig) if in.VNET != nil { in, out := &in.VNET, &out.VNET *out = new(string) @@ -600,6 +599,7 @@ func (in *Network) DeepCopyInto(out *Network) { *out = new(bool) **out = **in } + in.IPAMConfig.DeepCopyInto(&out.IPAMConfig) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml index c5b7a3af..9ecd4eea 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml @@ -69,6 +69,12 @@ spec: items: description: Network contains the config for additional LANs. properties: + dhcp: + default: true + description: |- + DHCP indicates whether DHCP is enabled for the LAN. + The primary NIC will always have DHCP enabled. + type: boolean ipv4PoolRef: description: |- IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. @@ -127,12 +133,6 @@ spec: rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' - message: ipv6PoolRef.name is required rule: self.name != '' - dhcp: - default: true - description: |- - DHCP indicates whether DHCP is enabled for the LAN. - The primary NIC will always have DHCP enabled. - type: boolean networkID: description: |- NetworkID represents an ID an existing LAN in the data center. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml index f28a1844..b672c2ef 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml @@ -79,6 +79,12 @@ spec: description: Network contains the config for additional LANs. properties: + dhcp: + default: true + description: |- + DHCP indicates whether DHCP is enabled for the LAN. + The primary NIC will always have DHCP enabled. + type: boolean ipv4PoolRef: description: |- IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. @@ -147,12 +153,6 @@ spec: == 'GlobalInClusterIPPool' - message: ipv6PoolRef.name is required rule: self.name != '' - dhcp: - default: true - description: |- - DHCP indicates whether DHCP is enabled for the LAN. - The primary NIC will always have DHCP enabled. - type: boolean networkID: description: |- NetworkID represents an ID an existing LAN in the data center. diff --git a/internal/service/cloud/server.go b/internal/service/cloud/server.go index 1856c5ab..b24f0c53 100644 --- a/internal/service/cloud/server.go +++ b/internal/service/cloud/server.go @@ -442,10 +442,10 @@ func (s *Service) buildServerEntities(ms *scope.Machine, params serverEntityPara for i, nw := range ms.IonosMachine.Spec.AdditionalNetworks { nic := sdk.Nic{ Properties: &sdk.NicProperties{ - Lan: &nw.NetworkID, - Vnet: nw.VNET, - Dhcp: nw.DHCP, - }, + Lan: &nw.NetworkID, + Vnet: nw.VNET, + Dhcp: nw.DHCP, + }, } if ms.IonosMachine.Status.MachineNetworkInfo != nil { diff --git a/test/e2e/capic_test.go b/test/e2e/capic_test.go index 01a0718c..dea8ccac 100644 --- a/test/e2e/capic_test.go +++ b/test/e2e/capic_test.go @@ -111,7 +111,7 @@ var _ = Describe("Should be able to create a cluster with 1 control-plane using ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, PostNamespaceCreated: cloudEnv.createCredentialsSecretPNC, - PostMachinesProvisioned: func(managementClusterProxy framework.ClusterProxy, namespace, clusterName string) { + PostMachinesProvisioned: func(managementClusterProxy framework.ClusterProxy, namespace, _ string) { machines := &infrav1.IonosCloudMachineList{} Expect(managementClusterProxy.GetClient().List(ctx, machines, runtimeclient.InNamespace(namespace))).NotTo(HaveOccurred()) nic := machines.Items[0].Status.MachineNetworkInfo.NICInfo[0] From 76bd54b5114a3f7908c488cff1e28b88646f5f7c Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Wed, 28 Aug 2024 12:19:00 +0200 Subject: [PATCH 09/13] Fix linter issues --- api/v1alpha1/ionoscloudmachine_types.go | 10 ++++----- api/v1alpha1/ionoscloudmachine_types_test.go | 2 +- api/v1alpha1/zz_generated.deepcopy.go | 23 +------------------- 3 files changed, 6 insertions(+), 29 deletions(-) diff --git a/api/v1alpha1/ionoscloudmachine_types.go b/api/v1alpha1/ionoscloudmachine_types.go index 3494079e..c07098aa 100644 --- a/api/v1alpha1/ionoscloudmachine_types.go +++ b/api/v1alpha1/ionoscloudmachine_types.go @@ -151,8 +151,11 @@ type IonosCloudMachineSpec struct { Disk *Volume `json:"disk"` // AdditionalNetworks defines the additional network configurations for the VM. + // + // +listType=map + // +listMapKey=networkID //+optional - AdditionalNetworks Networks `json:"additionalNetworks,omitempty"` + AdditionalNetworks []Network `json:"additionalNetworks,omitempty"` // IPAMConfig allows to obtain IP Addresses from existing IP pools instead of using DHCP. IPAMConfig `json:",inline"` @@ -175,11 +178,6 @@ type IonosCloudMachineSpec struct { Type ServerType `json:"type,omitempty"` } -// Networks contains a list of additional LAN IDs that should be attached to the VM. -// +listType=map -// +listMapKey=networkID -type Networks []Network - // Network contains the config for additional LANs. type Network struct { // NetworkID represents an ID an existing LAN in the data center. diff --git a/api/v1alpha1/ionoscloudmachine_types_test.go b/api/v1alpha1/ionoscloudmachine_types_test.go index ae986c28..8ce11396 100644 --- a/api/v1alpha1/ionoscloudmachine_types_test.go +++ b/api/v1alpha1/ionoscloudmachine_types_test.go @@ -55,7 +55,7 @@ func defaultMachine() *IonosCloudMachine { ID: "1eef-48ec-a246-a51a33aa4f3a", }, }, - AdditionalNetworks: Networks{ + AdditionalNetworks: []Network{ { NetworkID: 1, }, diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 0ad7b4e3..736fcfaf 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -383,7 +383,7 @@ func (in *IonosCloudMachineSpec) DeepCopyInto(out *IonosCloudMachineSpec) { } if in.AdditionalNetworks != nil { in, out := &in.AdditionalNetworks, &out.AdditionalNetworks - *out = make(Networks, len(*in)) + *out = make([]Network, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -612,27 +612,6 @@ func (in *Network) DeepCopy() *Network { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in Networks) DeepCopyInto(out *Networks) { - { - in := &in - *out = make(Networks, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networks. -func (in Networks) DeepCopy() Networks { - if in == nil { - return nil - } - out := new(Networks) - in.DeepCopyInto(out) - return *out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProvisioningRequest) DeepCopyInto(out *ProvisioningRequest) { *out = *in From a62b645c8ac799091bdec7502b6dd4a4a8f21105 Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Wed, 28 Aug 2024 12:57:34 +0200 Subject: [PATCH 10/13] Fix linter issues --- internal/service/cloud/server.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/internal/service/cloud/server.go b/internal/service/cloud/server.go index b24f0c53..7bbda6c5 100644 --- a/internal/service/cloud/server.go +++ b/internal/service/cloud/server.go @@ -416,6 +416,7 @@ func (s *Service) buildServerEntities(ms *scope.Machine, params serverEntityPara Properties: &sdk.NicProperties{ Lan: ¶ms.lanID, Name: ptr.To(s.nicName(ms.IonosMachine)), + Dhcp: ptr.To(true), }, } @@ -425,8 +426,6 @@ func (s *Service) buildServerEntities(ms *scope.Machine, params serverEntityPara primaryNIC.Properties.Ipv6Ips = ptr.To(nicInfo.IPv6Addresses) } - primaryNIC.Properties.Dhcp = ptr.To(true) - // In case we want to retrieve a public IP from the DHCP, we need to // create a NIC with empty IP addresses and patch the NIC afterward. // To simplify the code we also follow this approach when using IP pools. @@ -454,8 +453,6 @@ func (s *Service) buildServerEntities(ms *scope.Machine, params serverEntityPara nic.Properties.Ipv6Ips = ptr.To(nicInfo.IPv6Addresses) } - nic.Properties.Dhcp = ptr.To(true) - items = append(items, nic) } From 333600c1fe3c571e95bba47ea8933ccba2714043 Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Wed, 28 Aug 2024 20:36:12 +0200 Subject: [PATCH 11/13] review rework --- test/e2e/env_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/env_test.go b/test/e2e/env_test.go index 0224feb8..fd2fea5c 100644 --- a/test/e2e/env_test.go +++ b/test/e2e/env_test.go @@ -144,7 +144,7 @@ func (e *ionosCloudEnv) reserveIPBlock(ctx context.Context, location string, siz } ips := (*e.ipBlock.Properties.Ips) - Expect(os.Setenv("CONTROL_PLANE_ENDPOINT_IP", ips[0])).ToNot(HaveOccurred(), "Failed setting control plane endpoint ID in environment variable") + Expect(os.Setenv("CONTROL_PLANE_ENDPOINT_IP", ips[0])).ToNot(HaveOccurred(), "Failed setting control plane endpoint IP in environment variable") if len(ips) > 1 { Expect(os.Setenv("ADDITIONAL_IPS", strings.Join(ips[1:], ","))).ToNot(HaveOccurred(), "Failed setting additional IPs in environment variable") } From ead48b99ccf8484040157bfe97b4bba17e0c46e7 Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Thu, 29 Aug 2024 13:49:02 +0200 Subject: [PATCH 12/13] Remove listtype+listmapkey annotation from AdditionalNetworks --- api/v1alpha1/ionoscloudmachine_types.go | 2 -- .../infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml | 3 --- ...astructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml | 3 --- 3 files changed, 8 deletions(-) diff --git a/api/v1alpha1/ionoscloudmachine_types.go b/api/v1alpha1/ionoscloudmachine_types.go index c07098aa..0a4abae3 100644 --- a/api/v1alpha1/ionoscloudmachine_types.go +++ b/api/v1alpha1/ionoscloudmachine_types.go @@ -152,8 +152,6 @@ type IonosCloudMachineSpec struct { // AdditionalNetworks defines the additional network configurations for the VM. // - // +listType=map - // +listMapKey=networkID //+optional AdditionalNetworks []Network `json:"additionalNetworks,omitempty"` diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml index 9ecd4eea..2072c4d2 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml @@ -148,9 +148,6 @@ spec: - networkID type: object type: array - x-kubernetes-list-map-keys: - - networkID - x-kubernetes-list-type: map availabilityZone: default: AUTO description: AvailabilityZone is the availability zone in which the diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml index b672c2ef..2565b2a7 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml @@ -168,9 +168,6 @@ spec: - networkID type: object type: array - x-kubernetes-list-map-keys: - - networkID - x-kubernetes-list-type: map availabilityZone: default: AUTO description: AvailabilityZone is the availability zone in From 6a2b3875f3c75e5357c28316dfcd5873c829f90e Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Fri, 30 Aug 2024 10:01:07 +0200 Subject: [PATCH 13/13] e2e tests: Wait for datacenter deletion request to complete before deleting ip blocks --- test/e2e/env_test.go | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/test/e2e/env_test.go b/test/e2e/env_test.go index fd2fea5c..69a86df6 100644 --- a/test/e2e/env_test.go +++ b/test/e2e/env_test.go @@ -85,11 +85,14 @@ func (e *ionosCloudEnv) teardown() { By("Requesting the deletion of the data center") datacenterRequest := e.deleteDatacenter(ctx) + By("Waiting for the deletion request to complete") + e.waitForDataCenterDeletion(ctx, datacenterRequest) + By("Requesting the deletion of the IP Block") ipBlockRequest := e.deleteIPBlock(ctx) - By("Waiting for deletion requests to complete") - e.waitForDeletionRequests(ctx, datacenterRequest, ipBlockRequest) + By("Waiting for deletion request to complete") + e.waitForIPBlockDeletion(ctx, ipBlockRequest) } } @@ -170,17 +173,22 @@ func (e *ionosCloudEnv) waitForCreationRequests(ctx context.Context, datacenterR Expect(err).ToNot(HaveOccurred(), "failed waiting for IP block reservation") } -func (e *ionosCloudEnv) waitForDeletionRequests(ctx context.Context, datacenterRequest, ipBlockRequest string) { - GinkgoLogr.Info("Waiting for data center and IP block deletion requests to complete", - "datacenterRequest", datacenterRequest, - "datacenterID", e.datacenterID, +func (e *ionosCloudEnv) waitForIPBlockDeletion(ctx context.Context, ipBlockRequest string) { + GinkgoLogr.Info("Waiting for IP block deletion requests to complete", "ipBlockRequest", ipBlockRequest, "ipBlockID", *e.ipBlock.Id) + _, err := e.api.WaitForRequest(ctx, ipBlockRequest) + Expect(err).ToNot(HaveOccurred(), "failed waiting for IP block deletion") +} + +func (e *ionosCloudEnv) waitForDataCenterDeletion(ctx context.Context, datacenterRequest string) { + GinkgoLogr.Info("Waiting for data center deletion requests to complete", + "datacenterRequest", datacenterRequest, + "datacenterID", e.datacenterID) + _, err := e.api.WaitForRequest(ctx, datacenterRequest) Expect(err).ToNot(HaveOccurred(), "failed waiting for data center deletion") - _, err = e.api.WaitForRequest(ctx, ipBlockRequest) - Expect(err).ToNot(HaveOccurred(), "failed waiting for IP block deletion") } // createCredentialsSecretPNC creates a secret with the IONOS Cloud credentials. This secret should be used as the