From a94918026c1ae511eae5a97492a94b8efa9ea63c Mon Sep 17 00:00:00 2001 From: Xun Jiang Date: Fri, 20 Oct 2023 13:25:55 +0800 Subject: [PATCH] Add HealthCheckNodePort deletion logic in Service restore. Signed-off-by: Xun Jiang --- changelogs/unreleased/7026-blackpiglet | 1 + pkg/restore/service_action.go | 69 +++++++++ pkg/restore/service_action_test.go | 161 +++++++++++++++++++- site/content/docs/main/restore-reference.md | 16 +- 4 files changed, 239 insertions(+), 8 deletions(-) create mode 100644 changelogs/unreleased/7026-blackpiglet diff --git a/changelogs/unreleased/7026-blackpiglet b/changelogs/unreleased/7026-blackpiglet new file mode 100644 index 0000000000..0decfabf74 --- /dev/null +++ b/changelogs/unreleased/7026-blackpiglet @@ -0,0 +1 @@ +Add HealthCheckNodePort deletion logic for Service restore. \ No newline at end of file diff --git a/pkg/restore/service_action.go b/pkg/restore/service_action.go index 2dbac3a894..6fc1b2cb41 100644 --- a/pkg/restore/service_action.go +++ b/pkg/restore/service_action.go @@ -66,6 +66,9 @@ func (a *ServiceAction) Execute(input *velero.RestoreItemActionExecuteInput) (*v if err := deleteNodePorts(service); err != nil { return nil, err } + if err := deleteHealthCheckNodePort(service); err != nil { + return nil, err + } } res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(service) @@ -76,6 +79,72 @@ func (a *ServiceAction) Execute(input *velero.RestoreItemActionExecuteInput) (*v return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil } +func deleteHealthCheckNodePort(service *corev1api.Service) error { + // Check service type and external traffic policy setting, + // if the setting is not applicable for HealthCheckNodePort, return early. + if service.Spec.ExternalTrafficPolicy != corev1api.ServiceExternalTrafficPolicyTypeLocal || + service.Spec.Type != corev1api.ServiceTypeLoadBalancer { + return nil + } + + // HealthCheckNodePort is already 0, return. + if service.Spec.HealthCheckNodePort == 0 { + return nil + } + + // Search HealthCheckNodePort from server's last-applied-configuration + // annotation(HealthCheckNodePort is specified by `kubectl apply` command) + lastAppliedConfig, ok := service.Annotations[annotationLastAppliedConfig] + if ok { + appliedServiceUnstructured := new(map[string]interface{}) + if err := json.Unmarshal([]byte(lastAppliedConfig), appliedServiceUnstructured); err != nil { + return errors.WithStack(err) + } + + healthCheckNodePort, exist, err := unstructured.NestedFloat64(*appliedServiceUnstructured, "spec", "healthCheckNodePort") + if err != nil { + return errors.WithStack(err) + } + + // Found healthCheckNodePort in lastAppliedConfig annotation, + // and the value is not 0. No need to delete, return. + if exist && healthCheckNodePort != 0 { + return nil + } + } + + // Search HealthCheckNodePort from ManagedFields(HealthCheckNodePort + // is specified by `kubectl apply --server-side` command). + for _, entry := range service.GetManagedFields() { + if entry.FieldsV1 == nil { + continue + } + fields := new(map[string]interface{}) + if err := json.Unmarshal(entry.FieldsV1.Raw, fields); err != nil { + return errors.WithStack(err) + } + + _, exist, err := unstructured.NestedMap(*fields, "f:spec", "f:healthCheckNodePort") + if err != nil { + return errors.WithStack(err) + } + if !exist { + continue + } + // Because the format in ManagedFields is `f:healthCheckNodePort: {}`, + // cannot get the value, check whether exists is enough. + // Found healthCheckNodePort in ManagedFields. + // No need to delete. Return. + return nil + } + + // Cannot find HealthCheckNodePort from Annotation and + // ManagedFields, which means it's auto-generated. Delete it. + service.Spec.HealthCheckNodePort = 0 + + return nil +} + func deleteNodePorts(service *corev1api.Service) error { if service.Spec.Type == corev1api.ServiceTypeExternalName { return nil diff --git a/pkg/restore/service_action_test.go b/pkg/restore/service_action_test.go index d80cc1c450..be722fa2a0 100644 --- a/pkg/restore/service_action_test.go +++ b/pkg/restore/service_action_test.go @@ -36,7 +36,8 @@ import ( func svcJSON(ports ...corev1api.ServicePort) string { svc := corev1api.Service{ Spec: corev1api.ServiceSpec{ - Ports: ports, + HealthCheckNodePort: 8080, + Ports: ports, }, } @@ -486,6 +487,164 @@ func TestServiceActionExecute(t *testing.T) { }, }, }, + { + name: "If PreserveNodePorts is True in restore spec then HealthCheckNodePort always preserved.", + obj: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + Ports: []corev1api.ServicePort{ + { + Name: "http", + Port: 80, + NodePort: 8080, + }, + { + Name: "hepsiburada", + NodePort: 9025, + }, + }, + }, + }, + restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(true).Result(), + expectedRes: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + Ports: []corev1api.ServicePort{ + { + Name: "http", + Port: 80, + NodePort: 8080, + }, + { + Name: "hepsiburada", + NodePort: 9025, + }, + }, + }, + }, + }, + { + name: "If PreserveNodePorts is False in restore spec then HealthCheckNodePort should be cleaned.", + obj: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(false).Result(), + expectedRes: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 0, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + }, + { + name: "If PreserveNodePorts is false in restore spec, but service is not expected, then HealthCheckNodePort should be kept.", + obj: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeCluster, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(false).Result(), + expectedRes: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeCluster, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + }, + { + name: "If PreserveNodePorts is false in restore spec, but HealthCheckNodePort can be found in Annotation, then it should be kept.", + obj: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + Annotations: map[string]string{annotationLastAppliedConfig: svcJSON()}, + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(false).Result(), + expectedRes: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + Annotations: map[string]string{annotationLastAppliedConfig: svcJSON()}, + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + }, + { + name: "If PreserveNodePorts is false in restore spec, but HealthCheckNodePort can be found in ManagedFields, then it should be kept.", + obj: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + ManagedFields: []metav1.ManagedFieldsEntry{ + { + FieldsV1: &metav1.FieldsV1{ + Raw: []byte(`{"f:spec":{"f:healthCheckNodePort":{}}}`), + }, + }, + }, + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(false).Result(), + expectedRes: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + ManagedFields: []metav1.ManagedFieldsEntry{ + { + FieldsV1: &metav1.FieldsV1{ + Raw: []byte(`{"f:spec":{"f:healthCheckNodePort":{}}}`), + }, + }, + }, + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + }, } for _, test := range tests { diff --git a/site/content/docs/main/restore-reference.md b/site/content/docs/main/restore-reference.md index d50a12a43f..2f5a189125 100644 --- a/site/content/docs/main/restore-reference.md +++ b/site/content/docs/main/restore-reference.md @@ -280,21 +280,23 @@ There are two ways to delete a Restore object: 1. Deleting with `velero restore delete` will delete the Custom Resource representing the restore, along with its individual log and results files. It will not delete any objects that were created by the restore in your cluster. 2. Deleting with `kubectl -n velero delete restore` will delete the Custom Resource representing the restore. It will not delete restore log or results files from object storage, or any objects that were created during the restore in your cluster. -## What happens to NodePorts when restoring Services +## What happens to NodePorts and HealthCheckNodePort when restoring Services -During a restore, Velero deletes **Auto assigned** NodePorts by default and Services get new **auto assigned** nodePorts after restore. +During a restore, Velero deletes **Auto assigned** NodePorts and HealthCheckNodePort by default and Services get new **auto assigned** nodePorts and healthCheckNodePort after restore. -Velero auto detects **explicitly specified** NodePorts using **`last-applied-config`** annotation and they are **preserved** after restore. NodePorts can be explicitly specified as `.spec.ports[*].nodePort` field on Service definition. +Velero auto detects **explicitly specified** NodePorts using **`last-applied-config`** annotation and **`managedFields`**. They are **preserved** after restore. NodePorts can be explicitly specified as `.spec.ports[*].nodePort` field on Service definition. -### Always Preserve NodePorts +Velero will do the same to the `HealthCheckNodePort` as `NodePorts`. -It is not always possible to set nodePorts explicitly on some big clusters because of operational complexity. As the Kubernetes [NodePort documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) states, "if you want a specific port number, you can specify a value in the `nodePort` field. The control plane will either allocate you that port or report that the API transaction failed. This means that you need to take care of possible port collisions yourself. You also have to use a valid port number, one that's inside the range configured for NodePort use."" +### Always Preserve NodePorts and HealthCheckNodePort + +It is not always possible to set nodePorts and healthCheckNodePort explicitly on some big clusters because of operational complexity. As the Kubernetes [NodePort documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) states, "if you want a specific port number, you can specify a value in the `nodePort` field. The control plane will either allocate you that port or report that the API transaction failed. This means that you need to take care of possible port collisions yourself. You also have to use a valid port number, one that's inside the range configured for NodePort use."" The clusters which are not explicitly specifying nodePorts may still need to restore original NodePorts in the event of a disaster. Auto assigned nodePorts are typically defined on Load Balancers located in front of cluster. Changing all these nodePorts on Load Balancers is another operation complexity you are responsible for updating after disaster if nodePorts are changed. -Use the `velero restore create ` command's `--preserve-nodeports` flag to preserve Service nodePorts always, regardless of whether nodePorts are explicitly specified or not. This flag is used for preserving the original nodePorts from a backup and can be used as `--preserve-nodeports` or `--preserve-nodeports=true`. If this flag is present, Velero will not remove the nodePorts when restoring a Service, but will try to use the nodePorts from the backup. +Use the `velero restore create ` command's `--preserve-nodeports` flag to preserve Service nodePorts and healthCheckNodePort always, regardless of whether nodePorts are explicitly specified or not. This flag is used for preserving the original nodePorts and healthCheckNodePort from a backup and can be used as `--preserve-nodeports` or `--preserve-nodeports=true`. If this flag is present, Velero will not remove the nodePorts and healthCheckNodePort when restoring a Service, but will try to use the nodePorts from the backup. -Trying to preserve nodePorts may cause port conflicts when restoring on situations below: +Trying to preserve nodePorts and healthCheckNodePort may cause port conflicts when restoring on situations below: - If the nodePort from the backup is already allocated on the target cluster then Velero prints error log as shown below and continues the restore operation.