From b049fc412ec4441be76373118278d8080ad4407e Mon Sep 17 00:00:00 2001 From: Hiromu Asahina Date: Fri, 7 Apr 2023 22:27:41 +0900 Subject: [PATCH] Add Taint during rolling update --- api/v1beta1/common_types.go | 8 + .../controllers/machine/machine_controller.go | 8 + .../machine/machine_controller_noderef.go | 51 +++- .../machine_controller_noderef_test.go | 218 +++++++++++++++++- internal/util/taints/taints.go | 10 + test/e2e/data/test-extension/deployment.yaml | 138 +++++++++++ util/util.go | 45 ++++ util/util_test.go | 64 +++++ 8 files changed, 538 insertions(+), 4 deletions(-) create mode 100644 test/e2e/data/test-extension/deployment.yaml diff --git a/api/v1beta1/common_types.go b/api/v1beta1/common_types.go index 6edc7e5bed48..77b38056b69a 100644 --- a/api/v1beta1/common_types.go +++ b/api/v1beta1/common_types.go @@ -217,6 +217,14 @@ const ( MachineSetPreflightCheckControlPlaneIsStable MachineSetPreflightCheck = "ControlPlaneIsStable" ) +// NodeOutdatedRevisionTaint can be added to Nodes at rolling updates in general triggered by updating MachineDeployment +// This taint is used to prevent unnecessary pod churn, i.e., as the first node is drained, pods previously running on +// that node are scheduled onto nodes who have yet to be replaced, but will be torn down soon. +var NodeOutdatedRevisionTaint = corev1.Taint{ + Key: "node.cluster.x-k8s.io/outdated-revision", + Effect: corev1.TaintEffectPreferNoSchedule, +} + // NodeUninitializedTaint can be added to Nodes at creation by the bootstrap provider, e.g. the // KubeadmBootstrap provider will add the taint. // This taint is used to prevent workloads to be scheduled on Nodes before the node is initialized by Cluster API. diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 7ffe710dff5c..afce7bde528d 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -100,6 +100,10 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt if err != nil { return err } + msToMachines, err := util.MachineSetToObjectsMapper(mgr.GetClient(), &clusterv1.MachineList{}, mgr.GetScheme()) + if err != nil { + return err + } if r.nodeDeletionRetryTimeout.Nanoseconds() == 0 { r.nodeDeletionRetryTimeout = 10 * time.Second @@ -122,6 +126,10 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), ), )). + Watches( + &clusterv1.MachineSet{}, + handler.EnqueueRequestsFromMapFunc(msToMachines), + ). Build(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") diff --git a/internal/controllers/machine/machine_controller_noderef.go b/internal/controllers/machine/machine_controller_noderef.go index 7f32c72c48c5..c28e13c4062c 100644 --- a/internal/controllers/machine/machine_controller_noderef.go +++ b/internal/controllers/machine/machine_controller_noderef.go @@ -31,6 +31,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/api/v1beta1/index" + "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" "sigs.k8s.io/cluster-api/internal/util/taints" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" @@ -133,7 +134,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result, _, nodeHadInterruptibleLabel := node.Labels[clusterv1.InterruptibleLabel] // Reconcile node taints - if err := r.patchNode(ctx, remoteClient, node, nodeLabels, nodeAnnotations); err != nil { + if err := r.patchNode(ctx, remoteClient, node, nodeLabels, nodeAnnotations, machine); err != nil { return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile Node %s", klog.KObj(node)) } if !nodeHadInterruptibleLabel && interruptible { @@ -255,7 +256,9 @@ func (r *Reconciler) getNode(ctx context.Context, c client.Reader, providerID st // PatchNode is required to workaround an issue on Node.Status.Address which is incorrectly annotated as patchStrategy=merge // and this causes SSA patch to fail in case there are two addresses with the same key https://github.com/kubernetes-sigs/cluster-api/issues/8417 -func (r *Reconciler) patchNode(ctx context.Context, remoteClient client.Client, node *corev1.Node, newLabels, newAnnotations map[string]string) error { +func (r *Reconciler) patchNode(ctx context.Context, remoteClient client.Client, node *corev1.Node, newLabels, newAnnotations map[string]string, m *clusterv1.Machine) error { + log := ctrl.LoggerFrom(ctx) + newNode := node.DeepCopy() // Adds the annotations CAPI sets on the node. @@ -292,9 +295,53 @@ func (r *Reconciler) patchNode(ctx context.Context, remoteClient client.Client, // Drop the NodeUninitializedTaint taint on the node given that we are reconciling labels. hasTaintChanges := taints.RemoveNodeTaint(newNode, clusterv1.NodeUninitializedTaint) + // Set Taint to a node in an old machineDeployment and unset Taint from a node in a new machineDeployment + // Ignore errors as it's not critical for the reconcile. + // To avoid an unnecessary Taint remaining due to the error remove Taint when errors occur. + isOutdated, err := isNodeOutdated(ctx, r.Client, m) + if err != nil { + log.V(2).Info("Failed to check if Node %s is outdated", "err", err, "node name", node.Name) + } + if isOutdated { + hasTaintChanges = taints.EnsureNodeTaint(newNode, clusterv1.NodeOutdatedRevisionTaint) || hasTaintChanges + } else { + hasTaintChanges = taints.RemoveNodeTaint(newNode, clusterv1.NodeOutdatedRevisionTaint) || hasTaintChanges + } + if !hasAnnotationChanges && !hasLabelChanges && !hasTaintChanges { return nil } return remoteClient.Patch(ctx, newNode, client.StrategicMergeFrom(node)) } + +func isNodeOutdated(ctx context.Context, c client.Client, m *clusterv1.Machine) (bool, error) { + ms := &clusterv1.MachineSet{} + objKey := client.ObjectKey{ + Namespace: m.ObjectMeta.Namespace, + Name: m.Labels[clusterv1.MachineSetNameLabel], + } + if err := c.Get(ctx, objKey, ms); err != nil { + return false, err + } + md := &clusterv1.MachineDeployment{} + objKey = client.ObjectKey{ + Namespace: m.ObjectMeta.Namespace, + Name: m.Labels[clusterv1.MachineDeploymentNameLabel], + } + if err := c.Get(ctx, objKey, md); err != nil { + return false, err + } + msRev, err := mdutil.Revision(ms) + if err != nil { + return false, err + } + mdRev, err := mdutil.Revision(md) + if err != nil { + return false, err + } + if msRev < mdRev { + return true, nil + } + return false, nil +} diff --git a/internal/controllers/machine/machine_controller_noderef_test.go b/internal/controllers/machine/machine_controller_noderef_test.go index 0bcaaa1d060d..778beab666c0 100644 --- a/internal/controllers/machine/machine_controller_noderef_test.go +++ b/internal/controllers/machine/machine_controller_noderef_test.go @@ -515,6 +515,8 @@ func TestGetManagedLabels(t *testing.T) { } func TestPatchNode(t *testing.T) { + clusterName := "test-cluster" + testCases := []struct { name string oldNode *corev1.Node @@ -523,6 +525,9 @@ func TestPatchNode(t *testing.T) { expectedLabels map[string]string expectedAnnotations map[string]string expectedTaints []corev1.Taint + machine *clusterv1.Machine + ms *clusterv1.MachineSet + md *clusterv1.MachineDeployment }{ { name: "Check that patch works even if there are Status.Addresses with the same key", @@ -551,6 +556,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, // Labels (CAPI owns a subset of labels, everything else should be preserved) { @@ -569,6 +577,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, { name: "Add label must preserve existing labels", @@ -593,6 +604,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, { name: "CAPI takes ownership of existing labels if they are set from machines", @@ -616,6 +630,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, { name: "change a label previously set from machines", @@ -642,6 +659,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, { name: "Delete a label previously set from machines", @@ -666,6 +686,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, { name: "Label previously set from machine, already removed out of band, annotation should be cleaned up", @@ -683,6 +706,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, // Add annotations (CAPI only enforces some annotations and never changes or removes them) { @@ -710,6 +736,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, // Taint (CAPI only remove one taint if it exists, other taints should be preserved) { @@ -738,6 +767,125 @@ func TestPatchNode(t *testing.T) { }, {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), + }, + { + name: "Ensure NodeOutdatedRevisionTaint to be set if a node is associated to an outdated machineset", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("node-%s", util.RandomString(6)), + }, + }, + expectedAnnotations: map[string]string{ + clusterv1.LabelsFromMachineAnnotation: "", + }, + expectedTaints: []corev1.Taint{ + {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server + clusterv1.NodeOutdatedRevisionTaint, + }, + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ma-%s", util.RandomString(6)), + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: "test-ms-outdated", + clusterv1.MachineDeploymentNameLabel: "test-md-outdated", + }, + }, + Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName), + }, + ms: &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ms-outdated", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "1", + }, + }, + Spec: clusterv1.MachineSetSpec{ + ClusterName: clusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName), + }, + }, + }, + md: &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-md-outdated", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "2", + }, + }, + Spec: clusterv1.MachineDeploymentSpec{ + ClusterName: clusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName), + }, + }, + }, + }, + { + name: "Removes NodeOutdatedRevisionTaint if a node is associated to a non-outdated machineset", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("node-%s", util.RandomString(6)), + }, + Spec: corev1.NodeSpec{ + Taints: []corev1.Taint{ + clusterv1.NodeOutdatedRevisionTaint, + }, + }, + }, + expectedAnnotations: map[string]string{ + clusterv1.LabelsFromMachineAnnotation: "", + }, + expectedTaints: []corev1.Taint{ + {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server + }, + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ma-%s", util.RandomString(6)), + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: "test-ms-not-outdated", + clusterv1.MachineDeploymentNameLabel: "test-md-not-outdated", + }, + }, + Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName), + }, + ms: &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ms-not-outdated", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "3", + }, + }, + Spec: clusterv1.MachineSetSpec{ + ClusterName: clusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName), + }, + }, + }, + md: &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-md-not-outdated", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "2", + }, + }, + Spec: clusterv1.MachineDeploymentSpec{ + ClusterName: clusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName), + }, + }, + }, }, } @@ -749,13 +897,19 @@ func TestPatchNode(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) oldNode := tc.oldNode.DeepCopy() + machine := tc.machine.DeepCopy() + ms := tc.ms.DeepCopy() + md := tc.md.DeepCopy() g.Expect(env.Create(ctx, oldNode)).To(Succeed()) + g.Expect(env.Create(ctx, machine)).To(Succeed()) + g.Expect(env.Create(ctx, ms)).To(Succeed()) + g.Expect(env.Create(ctx, md)).To(Succeed()) t.Cleanup(func() { - _ = env.Cleanup(ctx, oldNode) + _ = env.Cleanup(ctx, oldNode, machine, ms, md) }) - err := r.patchNode(ctx, env, oldNode, tc.newLabels, tc.newAnnotations) + err := r.patchNode(ctx, env, oldNode, tc.newLabels, tc.newAnnotations, tc.machine) g.Expect(err).ToNot(HaveOccurred()) g.Eventually(func(g Gomega) { @@ -770,3 +924,63 @@ func TestPatchNode(t *testing.T) { }) } } + +func newFakeMachineSpec(namespace, clusterName string) clusterv1.MachineSpec { + return clusterv1.MachineSpec{ + ClusterName: clusterName, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha3", + Kind: "KubeadmConfigTemplate", + Name: fmt.Sprintf("%s-md-0", clusterName), + Namespace: namespace, + }, + }, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha3", + Kind: "FakeMachineTemplate", + Name: fmt.Sprintf("%s-md-0", clusterName), + Namespace: namespace, + }, + } +} + +func newFakeMachine(namespace, clusterName string) *clusterv1.Machine { + return &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ma-%s", util.RandomString(6)), + Namespace: namespace, + }, + Spec: newFakeMachineSpec(namespace, clusterName), + } +} + +func newFakeMachineSet(namespace, clusterName string) *clusterv1.MachineSet { + return &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ms-%s", util.RandomString(6)), + Namespace: namespace, + }, + Spec: clusterv1.MachineSetSpec{ + ClusterName: clusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: newFakeMachineSpec(namespace, clusterName), + }, + }, + } +} + +func newFakeMachineDeployment(namespace, clusterName string) *clusterv1.MachineDeployment { + return &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("md-%s", util.RandomString(6)), + Namespace: namespace, + }, + Spec: clusterv1.MachineDeploymentSpec{ + ClusterName: clusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: newFakeMachineSpec(namespace, clusterName), + }, + }, + } +} diff --git a/internal/util/taints/taints.go b/internal/util/taints/taints.go index 0e88cfdb046a..3bda0e5ec4b9 100644 --- a/internal/util/taints/taints.go +++ b/internal/util/taints/taints.go @@ -46,3 +46,13 @@ func HasTaint(taints []corev1.Taint, targetTaint corev1.Taint) bool { } return false } + +// EnsureNodeTaint makes sure the node has the Taint. +// It returns true if the taints are modified, false otherwise. +func EnsureNodeTaint(node *corev1.Node, taint corev1.Taint) bool { + if !HasTaint(node.Spec.Taints, taint) { + node.Spec.Taints = append(node.Spec.Taints, taint) + return true + } + return false +} diff --git a/test/e2e/data/test-extension/deployment.yaml b/test/e2e/data/test-extension/deployment.yaml new file mode 100644 index 000000000000..8c4846a53d51 --- /dev/null +++ b/test/e2e/data/test-extension/deployment.yaml @@ -0,0 +1,138 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + cluster.x-k8s.io/provider: runtime-extension-test + name: test-extension-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + cluster.x-k8s.io/provider: runtime-extension-test + name: test-extension-manager + namespace: test-extension-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + cluster.x-k8s.io/provider: runtime-extension-test + name: test-extension-manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - patch + - update + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + cluster.x-k8s.io/provider: runtime-extension-test + name: test-extension-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: test-extension-manager-role +subjects: +- kind: ServiceAccount + name: test-extension-manager + namespace: test-extension-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + cluster.x-k8s.io/provider: runtime-extension-test + name: test-extension-webhook-service + namespace: test-extension-system +spec: + ports: + - port: 443 + targetPort: webhook-server + selector: + app: test-extension-manager + cluster.x-k8s.io/provider: runtime-extension-test +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + cluster.x-k8s.io/provider: runtime-extension-test + name: test-extension-manager + namespace: test-extension-system +spec: + replicas: 1 + selector: + matchLabels: + app: test-extension-manager + cluster.x-k8s.io/provider: runtime-extension-test + template: + metadata: + labels: + app: test-extension-manager + cluster.x-k8s.io/provider: runtime-extension-test + spec: + containers: + - command: + - /manager + image: gcr.io/k8s-staging-cluster-api/test-extension-amd64:dev + imagePullPolicy: IfNotPresent + name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + serviceAccountName: test-extension-manager + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + volumes: + - name: cert + secret: + secretName: test-extension-webhook-service-cert +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + cluster.x-k8s.io/provider: runtime-extension-test + name: test-extension-serving-cert + namespace: test-extension-system +spec: + dnsNames: + - test-extension-webhook-service.test-extension-system.svc + - test-extension-webhook-service.test-extension-system.svc.cluster.local + - localhost + issuerRef: + kind: Issuer + name: test-extension-selfsigned-issuer + secretName: test-extension-webhook-service-cert + subject: + organizations: + - k8s-sig-cluster-lifecycle +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + labels: + cluster.x-k8s.io/provider: runtime-extension-test + name: test-extension-selfsigned-issuer + namespace: test-extension-system +spec: + selfSigned: {} diff --git a/util/util.go b/util/util.go index 93267f160e60..1f9ff7fad1c4 100644 --- a/util/util.go +++ b/util/util.go @@ -583,6 +583,51 @@ func ClusterToTypedObjectsMapper(c client.Client, ro client.ObjectList, scheme * }, nil } +// MachineSetToObjectsMapper returns a mapper function that gets a machineset and lists all objects for the object passed in +// and returns a list of requests. +func MachineSetToObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.MapFunc, error) { + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return nil, err + } + + isNamespaced, err := isAPINamespaced(gvk, c.RESTMapper()) + if err != nil { + return nil, err + } + + return func(_ context.Context, o client.Object) []ctrl.Request { + ms, ok := o.(*clusterv1.MachineSet) + if !ok { + return nil + } + + listOpts := []client.ListOption{ + client.MatchingLabels{ + clusterv1.MachineSetNameLabel: ms.Name, + }, + } + + if isNamespaced { + listOpts = append(listOpts, client.InNamespace(ms.Namespace)) + } + + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(gvk) + if err := c.List(context.TODO(), list, listOpts...); err != nil { + return nil + } + + results := []ctrl.Request{} + for _, obj := range list.Items { + results = append(results, ctrl.Request{ + NamespacedName: client.ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()}, + }) + } + return results + }, nil +} + // isAPINamespaced detects if a GroupVersionKind is namespaced. func isAPINamespaced(gk schema.GroupVersionKind, restmapper meta.RESTMapper) (bool, error) { restMapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind}) diff --git a/util/util_test.go b/util/util_test.go index c736c02b6eb1..b7d7f62f80f5 100644 --- a/util/util_test.go +++ b/util/util_test.go @@ -745,6 +745,70 @@ func TestClusterToObjectsMapper(t *testing.T) { } } +func TestMachineDeploymentToObjectsMapper(t *testing.T) { + g := NewWithT(t) + + cluster := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-md-0-jgbsb-5684b789c9", + }, + } + + table := []struct { + name string + objects []client.Object + input client.ObjectList + output []ctrl.Request + expectError bool + }{ + { + name: "should return a list of requests with labelled machines", + input: &clusterv1.MachineList{}, + objects: []client.Object{ + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine1", + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: "cluster-md-0-jgbsb-5684b789c9", + }, + }, + }, + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine2", + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: "cluster-md-0-jgbsb-5684b789c9", + }, + }, + }, + }, + output: []ctrl.Request{ + {NamespacedName: client.ObjectKey{Name: "machine1"}}, + {NamespacedName: client.ObjectKey{Name: "machine2"}}, + }, + }, + } + + for _, tc := range table { + tc.objects = append(tc.objects, cluster) + + scheme := runtime.NewScheme() + _ = clusterv1.AddToScheme(scheme) + + restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{clusterv1.GroupVersion}) + + // Add tc.input gvk to the restMapper. + gvk, err := apiutil.GVKForObject(tc.input, scheme) + g.Expect(err).ToNot(HaveOccurred()) + restMapper.Add(gvk, meta.RESTScopeNamespace) + + client := fake.NewClientBuilder().WithObjects(tc.objects...).WithRESTMapper(restMapper).Build() + f, err := MachineSetToObjectsMapper(client, tc.input, scheme) + g.Expect(err != nil, err).To(Equal(tc.expectError)) + g.Expect(f(ctx, cluster)).To(ConsistOf(tc.output)) + } +} + func TestOrdinalize(t *testing.T) { tests := []struct { input int