From 09ae5c677d899ab510804d92d6b55eedc3580512 Mon Sep 17 00:00:00 2001 From: Lee Bernick Date: Wed, 26 Apr 2023 12:10:13 -0400 Subject: [PATCH] Handle affinity assistant deadlock for node maintenance Prior to this commit, when cordoning a node for maintenance, the affinity assistant can deadlock. This happens because if the placeholder pod is scheduled to a node which is then marked unschedulable, new TaskRun pods cannot schedule or trigger scaleup for the cluster autoscaler because they have inter-pod affinity for the placeholder pod on the unschedulable node. This commit adds a new controller which watches for nodes. If the nodes become unschedulable, it deletes any affinity assistant pods running on them (but leaves any TaskRun pods). The affinity assistant statefulset will then recreate the placeholder pod, which will be scheduled to an available node (or trigger scale-up if there's a volume node affinity conflict). Existing TaskRuns cannot be scheduled until the placeholder pod is re-scheduled. This commit only handles situations where nodes are unschedulable. It doesn't handle situations where nodes run out of resources or reach their cap on the number of pods. Tested locally, it appears to work. --- cmd/controller/main.go | 2 + config/200-clusterrole.yaml | 3 + pkg/reconciler/node/controller.go | 41 ++ pkg/reconciler/node/node.go | 53 +++ .../kube/informers/core/v1/node/node.go | 110 +++++ .../reconciler/core/v1/node/controller.go | 160 +++++++ .../reconciler/core/v1/node/reconciler.go | 431 ++++++++++++++++++ .../kube/reconciler/core/v1/node/state.go | 97 ++++ vendor/modules.txt | 2 + 9 files changed, 899 insertions(+) create mode 100644 pkg/reconciler/node/controller.go create mode 100644 pkg/reconciler/node/node.go create mode 100644 vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/node/node.go create mode 100644 vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/node/controller.go create mode 100644 vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/node/reconciler.go create mode 100644 vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/node/state.go diff --git a/cmd/controller/main.go b/cmd/controller/main.go index aa80e20387d..9beec3af631 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -27,6 +27,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/reconciler/customrun" + "github.com/tektoncd/pipeline/pkg/reconciler/node" "github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun" "github.com/tektoncd/pipeline/pkg/reconciler/resolutionrequest" "github.com/tektoncd/pipeline/pkg/reconciler/taskrun" @@ -132,6 +133,7 @@ func main() { pipelinerun.NewController(opts, clock.RealClock{}, tpPipelineRun), resolutionrequest.NewController(clock.RealClock{}), customrun.NewController(), + node.NewController(), ) // Cleanly shutdown and flush telemetry when the application exits. diff --git a/config/200-clusterrole.yaml b/config/200-clusterrole.yaml index a6b22678320..004ce8310ab 100644 --- a/config/200-clusterrole.yaml +++ b/config/200-clusterrole.yaml @@ -25,6 +25,9 @@ rules: # Controller needs to watch Pods created by TaskRuns to see them progress. resources: ["pods"] verbs: ["list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list", "watch"] # Controller needs cluster access to all of the CRDs that it is responsible for # managing. - apiGroups: ["tekton.dev"] diff --git a/pkg/reconciler/node/controller.go b/pkg/reconciler/node/controller.go new file mode 100644 index 00000000000..5f7eeecb821 --- /dev/null +++ b/pkg/reconciler/node/controller.go @@ -0,0 +1,41 @@ +package node + +import ( + "context" + + "github.com/tektoncd/pipeline/pkg/apis/config" + + "github.com/tektoncd/pipeline/pkg/pipelinerunmetrics" + kubeclient "knative.dev/pkg/client/injection/kube/client" + nodeinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/node" + nodereconciler "knative.dev/pkg/client/injection/kube/reconciler/core/v1/node" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" +) + +// NewController instantiates a new controller.Impl from knative.dev/pkg/controller +func NewController() func(context.Context, configmap.Watcher) *controller.Impl { + return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl { + logger := logging.FromContext(ctx) + kubeclientset := kubeclient.Get(ctx) + nodeInformer := nodeinformer.Get(ctx) + configStore := config.NewStore(logger.Named("config-store"), pipelinerunmetrics.MetricsOnStore(logger)) + configStore.WatchConfigs(cmw) + + c := &Reconciler{ + KubeClientSet: kubeclientset, + } + + impl := nodereconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options { + return controller.Options{ + AgentName: "pod", + SkipStatusUpdates: true, + ConfigStore: configStore, + } + }) + + nodeInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + return impl + } +} diff --git a/pkg/reconciler/node/node.go b/pkg/reconciler/node/node.go new file mode 100644 index 00000000000..340e5bebbcc --- /dev/null +++ b/pkg/reconciler/node/node.go @@ -0,0 +1,53 @@ +package node + +import ( + "context" + "fmt" + + "github.com/tektoncd/pipeline/pkg/workspace" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + nodereconciler "knative.dev/pkg/client/injection/kube/reconciler/core/v1/node" + "knative.dev/pkg/logging" + pkgreconciler "knative.dev/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for Configuration resources. +type Reconciler struct { + KubeClientSet kubernetes.Interface +} + +// Check that our Reconciler implements noderunreconciler.Interface +var ( + _ nodereconciler.Interface = (*Reconciler)(nil) +) + +// ReconcileKind deletes any affinity assistant pods on unschedulable nodes +func (c *Reconciler) ReconcileKind(ctx context.Context, n *corev1.Node) pkgreconciler.Event { + if !n.Spec.Unschedulable { + return nil + } + logger := logging.FromContext(ctx) + logger.Debugf("found unschedulable node %s", n.Name) + + // Find all affinity assistant pods on the node + pods, err := c.KubeClientSet.CoreV1().Pods("").List(ctx, metav1.ListOptions{ + LabelSelector: workspace.LabelComponent + "=" + workspace.ComponentNameAffinityAssistant, + FieldSelector: "spec.nodeName=" + n.Name, + }) + if err != nil { + return fmt.Errorf("couldn't list affinity assistant pods on node %s: %s", n.Name, err) + } + + // Delete affinity assistant pods, allowing their statefulset controllers to recreate them. + // Pods are deleted individually since API server rejects DeleteCollection + for _, pod := range pods.Items { + err = c.KubeClientSet.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("error deleting affinity assistant pod %s in ns %s: %s", pod.Name, pod.Namespace, err) + } + } + + return nil +} diff --git a/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/node/node.go b/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/node/node.go new file mode 100644 index 00000000000..5fc46d77613 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/node/node.go @@ -0,0 +1,110 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package node + +import ( + context "context" + + apicorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + v1 "k8s.io/client-go/informers/core/v1" + kubernetes "k8s.io/client-go/kubernetes" + corev1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + client "knative.dev/pkg/client/injection/kube/client" + factory "knative.dev/pkg/client/injection/kube/informers/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) + injection.Dynamic.RegisterDynamicInformer(withDynamicInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Core().V1().Nodes() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +func withDynamicInformer(ctx context.Context) context.Context { + inf := &wrapper{client: client.Get(ctx), resourceVersion: injection.GetResourceVersion(ctx)} + return context.WithValue(ctx, Key{}, inf) +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.NodeInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch k8s.io/client-go/informers/core/v1.NodeInformer from context.") + } + return untyped.(v1.NodeInformer) +} + +type wrapper struct { + client kubernetes.Interface + + resourceVersion string +} + +var _ v1.NodeInformer = (*wrapper)(nil) +var _ corev1.NodeLister = (*wrapper)(nil) + +func (w *wrapper) Informer() cache.SharedIndexInformer { + return cache.NewSharedIndexInformer(nil, &apicorev1.Node{}, 0, nil) +} + +func (w *wrapper) Lister() corev1.NodeLister { + return w +} + +// SetResourceVersion allows consumers to adjust the minimum resourceVersion +// used by the underlying client. It is not accessible via the standard +// lister interface, but can be accessed through a user-defined interface and +// an implementation check e.g. rvs, ok := foo.(ResourceVersionSetter) +func (w *wrapper) SetResourceVersion(resourceVersion string) { + w.resourceVersion = resourceVersion +} + +func (w *wrapper) List(selector labels.Selector) (ret []*apicorev1.Node, err error) { + lo, err := w.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ + LabelSelector: selector.String(), + ResourceVersion: w.resourceVersion, + }) + if err != nil { + return nil, err + } + for idx := range lo.Items { + ret = append(ret, &lo.Items[idx]) + } + return ret, nil +} + +func (w *wrapper) Get(name string) (*apicorev1.Node, error) { + return w.client.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{ + ResourceVersion: w.resourceVersion, + }) +} diff --git a/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/node/controller.go b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/node/controller.go new file mode 100644 index 00000000000..893b3682e67 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/node/controller.go @@ -0,0 +1,160 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package node + +import ( + context "context" + fmt "fmt" + reflect "reflect" + strings "strings" + + zap "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" + record "k8s.io/client-go/tools/record" + client "knative.dev/pkg/client/injection/kube/client" + node "knative.dev/pkg/client/injection/kube/informers/core/v1/node" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" + logkey "knative.dev/pkg/logging/logkey" + reconciler "knative.dev/pkg/reconciler" +) + +const ( + defaultControllerAgentName = "node-controller" + defaultFinalizerName = "nodes.core" +) + +// NewImpl returns a controller.Impl that handles queuing and feeding work from +// the queue through an implementation of controller.Reconciler, delegating to +// the provided Interface and optional Finalizer methods. OptionsFn is used to return +// controller.ControllerOptions to be used by the internal reconciler. +func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl { + logger := logging.FromContext(ctx) + + // Check the options function input. It should be 0 or 1. + if len(optionsFns) > 1 { + logger.Fatal("Up to one options function is supported, found: ", len(optionsFns)) + } + + nodeInformer := node.Get(ctx) + + lister := nodeInformer.Lister() + + var promoteFilterFunc func(obj interface{}) bool + + rec := &reconcilerImpl{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + all, err := lister.List(labels.Everything()) + if err != nil { + return err + } + for _, elt := range all { + if promoteFilterFunc != nil { + if ok := promoteFilterFunc(elt); !ok { + continue + } + } + enq(bkt, types.NamespacedName{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, + Client: client.Get(ctx), + Lister: lister, + reconciler: r, + finalizerName: defaultFinalizerName, + } + + ctrType := reflect.TypeOf(r).Elem() + ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name()) + ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".") + + logger = logger.With( + zap.String(logkey.ControllerType, ctrTypeName), + zap.String(logkey.Kind, "core.Node"), + ) + + impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger}) + agentName := defaultControllerAgentName + + // Pass impl to the options. Save any optional results. + for _, fn := range optionsFns { + opts := fn(impl) + if opts.ConfigStore != nil { + rec.configStore = opts.ConfigStore + } + if opts.FinalizerName != "" { + rec.finalizerName = opts.FinalizerName + } + if opts.AgentName != "" { + agentName = opts.AgentName + } + if opts.SkipStatusUpdates { + rec.skipStatusUpdates = true + } + if opts.DemoteFunc != nil { + rec.DemoteFunc = opts.DemoteFunc + } + if opts.PromoteFilterFunc != nil { + promoteFilterFunc = opts.PromoteFilterFunc + } + } + + rec.Recorder = createRecorder(ctx, agentName) + + return impl +} + +func createRecorder(ctx context.Context, agentName string) record.EventRecorder { + logger := logging.FromContext(ctx) + + recorder := controller.GetEventRecorder(ctx) + if recorder == nil { + // Create event broadcaster + logger.Debug("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + watches := []watch.Interface{ + eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof), + eventBroadcaster.StartRecordingToSink( + &v1.EventSinkImpl{Interface: client.Get(ctx).CoreV1().Events("")}), + } + recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName}) + go func() { + <-ctx.Done() + for _, w := range watches { + w.Stop() + } + }() + } + + return recorder +} + +func init() { + scheme.AddToScheme(scheme.Scheme) +} diff --git a/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/node/reconciler.go b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/node/reconciler.go new file mode 100644 index 00000000000..37896933b26 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/node/reconciler.go @@ -0,0 +1,431 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package node + +import ( + context "context" + json "encoding/json" + fmt "fmt" + + zap "go.uber.org/zap" + "go.uber.org/zap/zapcore" + v1 "k8s.io/api/core/v1" + equality "k8s.io/apimachinery/pkg/api/equality" + errors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + sets "k8s.io/apimachinery/pkg/util/sets" + kubernetes "k8s.io/client-go/kubernetes" + corev1 "k8s.io/client-go/listers/core/v1" + record "k8s.io/client-go/tools/record" + controller "knative.dev/pkg/controller" + kmp "knative.dev/pkg/kmp" + logging "knative.dev/pkg/logging" + reconciler "knative.dev/pkg/reconciler" +) + +// Interface defines the strongly typed interfaces to be implemented by a +// controller reconciling v1.Node. +type Interface interface { + // ReconcileKind implements custom logic to reconcile v1.Node. Any changes + // to the objects .Status or .Finalizers will be propagated to the stored + // object. It is recommended that implementors do not call any update calls + // for the Kind inside of ReconcileKind, it is the responsibility of the calling + // controller to propagate those properties. The resource passed to ReconcileKind + // will always have an empty deletion timestamp. + ReconcileKind(ctx context.Context, o *v1.Node) reconciler.Event +} + +// Finalizer defines the strongly typed interfaces to be implemented by a +// controller finalizing v1.Node. +type Finalizer interface { + // FinalizeKind implements custom logic to finalize v1.Node. Any changes + // to the objects .Status or .Finalizers will be ignored. Returning a nil or + // Normal type reconciler.Event will allow the finalizer to be deleted on + // the resource. The resource passed to FinalizeKind will always have a set + // deletion timestamp. + FinalizeKind(ctx context.Context, o *v1.Node) reconciler.Event +} + +// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a +// controller reconciling v1.Node if they want to process resources for which +// they are not the leader. +type ReadOnlyInterface interface { + // ObserveKind implements logic to observe v1.Node. + // This method should not write to the API. + ObserveKind(ctx context.Context, o *v1.Node) reconciler.Event +} + +type doReconcile func(ctx context.Context, o *v1.Node) reconciler.Event + +// reconcilerImpl implements controller.Reconciler for v1.Node resources. +type reconcilerImpl struct { + // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. + reconciler.LeaderAwareFuncs + + // Client is used to write back status updates. + Client kubernetes.Interface + + // Listers index properties about resources. + Lister corev1.NodeLister + + // Recorder is an event recorder for recording Event resources to the + // Kubernetes API. + Recorder record.EventRecorder + + // configStore allows for decorating a context with config maps. + // +optional + configStore reconciler.ConfigStore + + // reconciler is the implementation of the business logic of the resource. + reconciler Interface + + // finalizerName is the name of the finalizer to reconcile. + finalizerName string + + // skipStatusUpdates configures whether or not this reconciler automatically updates + // the status of the reconciled resource. + skipStatusUpdates bool +} + +// Check that our Reconciler implements controller.Reconciler. +var _ controller.Reconciler = (*reconcilerImpl)(nil) + +// Check that our generated Reconciler is always LeaderAware. +var _ reconciler.LeaderAware = (*reconcilerImpl)(nil) + +func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client kubernetes.Interface, lister corev1.NodeLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { + // Check the options function input. It should be 0 or 1. + if len(options) > 1 { + logger.Fatal("Up to one options struct is supported, found: ", len(options)) + } + + // Fail fast when users inadvertently implement the other LeaderAware interface. + // For the typed reconcilers, Promote shouldn't take any arguments. + if _, ok := r.(reconciler.LeaderAware); ok { + logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r) + } + + rec := &reconcilerImpl{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + all, err := lister.List(labels.Everything()) + if err != nil { + return err + } + for _, elt := range all { + // TODO: Consider letting users specify a filter in options. + enq(bkt, types.NamespacedName{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, + Client: client, + Lister: lister, + Recorder: recorder, + reconciler: r, + finalizerName: defaultFinalizerName, + } + + for _, opts := range options { + if opts.ConfigStore != nil { + rec.configStore = opts.ConfigStore + } + if opts.FinalizerName != "" { + rec.finalizerName = opts.FinalizerName + } + if opts.SkipStatusUpdates { + rec.skipStatusUpdates = true + } + if opts.DemoteFunc != nil { + rec.DemoteFunc = opts.DemoteFunc + } + } + + return rec +} + +// Reconcile implements controller.Reconciler +func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + + // Initialize the reconciler state. This will convert the namespace/name + // string into a distinct namespace and name, determine if this instance of + // the reconciler is the leader, and any additional interfaces implemented + // by the reconciler. Returns an error is the resource key is invalid. + s, err := newState(key, r) + if err != nil { + logger.Error("Invalid resource key: ", key) + return nil + } + + // If we are not the leader, and we don't implement either ReadOnly + // observer interfaces, then take a fast-path out. + if s.isNotLeaderNorObserver() { + return controller.NewSkipKey(key) + } + + // If configStore is set, attach the frozen configuration to the context. + if r.configStore != nil { + ctx = r.configStore.ToContext(ctx) + } + + // Add the recorder to context. + ctx = controller.WithEventRecorder(ctx, r.Recorder) + + // Get the resource with this namespace/name. + + getter := r.Lister + + original, err := getter.Get(s.name) + + if errors.IsNotFound(err) { + // The resource may no longer exist, in which case we stop processing and call + // the ObserveDeletion handler if appropriate. + logger.Debugf("Resource %q no longer exists", key) + if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok { + return del.ObserveDeletion(ctx, types.NamespacedName{ + Namespace: s.namespace, + Name: s.name, + }) + } + return nil + } else if err != nil { + return err + } + + // Don't modify the informers copy. + resource := original.DeepCopy() + + var reconcileEvent reconciler.Event + + name, do := s.reconcileMethodFor(resource) + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", name)) + switch name { + case reconciler.DoReconcileKind: + // Set and update the finalizer on resource if r.reconciler + // implements Finalizer. + if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { + return fmt.Errorf("failed to set finalizers: %w", err) + } + + // Reconcile this copy of the resource and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileEvent = do(ctx, resource) + + case reconciler.DoFinalizeKind: + // For finalizing reconcilers, if this resource being marked for deletion + // and reconciled cleanly (nil or normal event), remove the finalizer. + reconcileEvent = do(ctx, resource) + + if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil { + return fmt.Errorf("failed to clear finalizers: %w", err) + } + + case reconciler.DoObserveKind: + // Observe any changes to this resource, since we are not the leader. + reconcileEvent = do(ctx, resource) + + } + + // Synchronize the status. + switch { + case r.skipStatusUpdates: + // This reconciler implementation is configured to skip resource updates. + // This may mean this reconciler does not observe spec, but reconciles external changes. + case equality.Semantic.DeepEqual(original.Status, resource.Status): + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the injectionInformer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + case !s.isLeader: + // High-availability reconcilers may have many replicas watching the resource, but only + // the elected leader is expected to write modifications. + logger.Warn("Saw status changes when we aren't the leader!") + default: + if err = r.updateStatus(ctx, logger, original, resource); err != nil { + logger.Warnw("Failed to update resource status", zap.Error(err)) + r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed", + "Failed to update status for %q: %v", resource.Name, err) + return err + } + } + + // Report the reconciler event, if any. + if reconcileEvent != nil { + var event *reconciler.ReconcilerEvent + if reconciler.EventAs(reconcileEvent, &event) { + logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) + r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) + + // the event was wrapped inside an error, consider the reconciliation as failed + if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { + return reconcileEvent + } + return nil + } + + if controller.IsSkipKey(reconcileEvent) { + // This is a wrapped error, don't emit an event. + } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok { + // This is a wrapped error, don't emit an event. + } else { + logger.Errorw("Returned an error", zap.Error(reconcileEvent)) + r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + } + return reconcileEvent + } + + return nil +} + +func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1.Node, desired *v1.Node) error { + existing = existing.DeepCopy() + return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { + // The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API. + if attempts > 0 { + + getter := r.Client.CoreV1().Nodes() + + existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{}) + if err != nil { + return err + } + } + + // If there's nothing to update, just return. + if equality.Semantic.DeepEqual(existing.Status, desired.Status) { + return nil + } + + if logger.Desugar().Core().Enabled(zapcore.DebugLevel) { + if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" { + logger.Debug("Updating status with: ", diff) + } + } + + existing.Status = desired.Status + + updater := r.Client.CoreV1().Nodes() + + _, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{}) + return err + }) +} + +// updateFinalizersFiltered will update the Finalizers of the resource. +// TODO: this method could be generic and sync all finalizers. For now it only +// updates defaultFinalizerName or its override. +func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1.Node, desiredFinalizers sets.String) (*v1.Node, error) { + // Don't modify the informers copy. + existing := resource.DeepCopy() + + var finalizers []string + + // If there's nothing to update, just return. + existingFinalizers := sets.NewString(existing.Finalizers...) + + if desiredFinalizers.Has(r.finalizerName) { + if existingFinalizers.Has(r.finalizerName) { + // Nothing to do. + return resource, nil + } + // Add the finalizer. + finalizers = append(existing.Finalizers, r.finalizerName) + } else { + if !existingFinalizers.Has(r.finalizerName) { + // Nothing to do. + return resource, nil + } + // Remove the finalizer. + existingFinalizers.Delete(r.finalizerName) + finalizers = existingFinalizers.List() + } + + mergePatch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "finalizers": finalizers, + "resourceVersion": existing.ResourceVersion, + }, + } + + patch, err := json.Marshal(mergePatch) + if err != nil { + return resource, err + } + + patcher := r.Client.CoreV1().Nodes() + + resourceName := resource.Name + updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed", + "Failed to update finalizers for %q: %v", resourceName, err) + } else { + r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate", + "Updated %q finalizers", resource.GetName()) + } + return updated, err +} + +func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1.Node) (*v1.Node, error) { + if _, ok := r.reconciler.(Finalizer); !ok { + return resource, nil + } + + finalizers := sets.NewString(resource.Finalizers...) + + // If this resource is not being deleted, mark the finalizer. + if resource.GetDeletionTimestamp().IsZero() { + finalizers.Insert(r.finalizerName) + } + + // Synchronize the finalizers filtered by r.finalizerName. + return r.updateFinalizersFiltered(ctx, resource, finalizers) +} + +func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1.Node, reconcileEvent reconciler.Event) (*v1.Node, error) { + if _, ok := r.reconciler.(Finalizer); !ok { + return resource, nil + } + if resource.GetDeletionTimestamp().IsZero() { + return resource, nil + } + + finalizers := sets.NewString(resource.Finalizers...) + + if reconcileEvent != nil { + var event *reconciler.ReconcilerEvent + if reconciler.EventAs(reconcileEvent, &event) { + if event.EventType == v1.EventTypeNormal { + finalizers.Delete(r.finalizerName) + } + } + } else { + finalizers.Delete(r.finalizerName) + } + + // Synchronize the finalizers filtered by r.finalizerName. + return r.updateFinalizersFiltered(ctx, resource, finalizers) +} diff --git a/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/node/state.go b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/node/state.go new file mode 100644 index 00000000000..2d89c96b7f3 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/node/state.go @@ -0,0 +1,97 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package node + +import ( + fmt "fmt" + + v1 "k8s.io/api/core/v1" + types "k8s.io/apimachinery/pkg/types" + cache "k8s.io/client-go/tools/cache" + reconciler "knative.dev/pkg/reconciler" +) + +// state is used to track the state of a reconciler in a single run. +type state struct { + // key is the original reconciliation key from the queue. + key string + // namespace is the namespace split from the reconciliation key. + namespace string + // name is the name split from the reconciliation key. + name string + // reconciler is the reconciler. + reconciler Interface + // roi is the read only interface cast of the reconciler. + roi ReadOnlyInterface + // isROI (Read Only Interface) the reconciler only observes reconciliation. + isROI bool + // isLeader the instance of the reconciler is the elected leader. + isLeader bool +} + +func newState(key string, r *reconcilerImpl) (*state, error) { + // Convert the namespace/name string into a distinct namespace and name. + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return nil, fmt.Errorf("invalid resource key: %s", key) + } + + roi, isROI := r.reconciler.(ReadOnlyInterface) + + isLeader := r.IsLeaderFor(types.NamespacedName{ + Namespace: namespace, + Name: name, + }) + + return &state{ + key: key, + namespace: namespace, + name: name, + reconciler: r.reconciler, + roi: roi, + isROI: isROI, + isLeader: isLeader, + }, nil +} + +// isNotLeaderNorObserver checks to see if this reconciler with the current +// state is enabled to do any work or not. +// isNotLeaderNorObserver returns true when there is no work possible for the +// reconciler. +func (s *state) isNotLeaderNorObserver() bool { + if !s.isLeader && !s.isROI { + // If we are not the leader, and we don't implement the ReadOnly + // interface, then take a fast-path out. + return true + } + return false +} + +func (s *state) reconcileMethodFor(o *v1.Node) (string, doReconcile) { + if o.GetDeletionTimestamp().IsZero() { + if s.isLeader { + return reconciler.DoReconcileKind, s.reconciler.ReconcileKind + } else if s.isROI { + return reconciler.DoObserveKind, s.roi.ObserveKind + } + } else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok { + return reconciler.DoFinalizeKind, fin.FinalizeKind + } + return "unknown", nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4987101bfba..99be3405a8d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1603,6 +1603,7 @@ knative.dev/pkg/client/injection/kube/informers/core/v1/configmap knative.dev/pkg/client/injection/kube/informers/core/v1/configmap/fake knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/fake +knative.dev/pkg/client/injection/kube/informers/core/v1/node knative.dev/pkg/client/injection/kube/informers/core/v1/pod/filtered knative.dev/pkg/client/injection/kube/informers/core/v1/pod/filtered/fake knative.dev/pkg/client/injection/kube/informers/core/v1/serviceaccount @@ -1611,6 +1612,7 @@ knative.dev/pkg/client/injection/kube/informers/factory knative.dev/pkg/client/injection/kube/informers/factory/fake knative.dev/pkg/client/injection/kube/informers/factory/filtered knative.dev/pkg/client/injection/kube/informers/factory/filtered/fake +knative.dev/pkg/client/injection/kube/reconciler/core/v1/node knative.dev/pkg/codegen/cmd/injection-gen knative.dev/pkg/codegen/cmd/injection-gen/args knative.dev/pkg/codegen/cmd/injection-gen/generators