From e9d54e78608c46e605165c8237f7632d37508e80 Mon Sep 17 00:00:00 2001 From: Luke Date: Mon, 27 May 2024 16:46:54 +0900 Subject: [PATCH 1/6] chore: upgrade karpenter to 0.36.2 --- .gitignore | 1 + pkg/apis/crds/karpenter.sh_nodepools.yaml | 9 +- pkg/apis/v1beta1/nodeclaim_status.go | 13 +- pkg/apis/v1beta1/nodepool.go | 7 +- pkg/apis/v1beta1/nodepool_validation.go | 3 - .../v1beta1/nodepool_validation_cel_test.go | 5 - pkg/apis/v1beta1/zz_generated.deepcopy.go | 5 + pkg/controllers/disruption/consolidation.go | 7 + .../nodeclaim/disruption/consolidation.go | 135 ++++++++++++++++++ .../nodeclaim/disruption/controller.go | 9 +- 10 files changed, 174 insertions(+), 20 deletions(-) create mode 100644 pkg/controllers/nodeclaim/disruption/consolidation.go diff --git a/.gitignore b/.gitignore index 95f28cff37..5dac84ced1 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ coverage.html *.test *.cpuprofile *.heapprofile +*.swp # Common in OSs and IDEs .idea diff --git a/pkg/apis/crds/karpenter.sh_nodepools.yaml b/pkg/apis/crds/karpenter.sh_nodepools.yaml index d7ebcc391d..abba8b1a82 100644 --- a/pkg/apis/crds/karpenter.sh_nodepools.yaml +++ b/pkg/apis/crds/karpenter.sh_nodepools.yaml @@ -135,10 +135,15 @@ spec: memory leak protection, and disruption testing. pattern: ^(([0-9]+(s|m|h))+)|(Never)$ type: string + utilizationThreshold: + description: |- + UtilizationThreshold is defined as sum of requested resources divided by capacity + below which a node can be considered for disruption. + maximum: 100 + minimum: 1 + type: integer type: object x-kubernetes-validations: - - message: consolidateAfter cannot be combined with consolidationPolicy=WhenUnderutilized - rule: 'has(self.consolidateAfter) ? self.consolidationPolicy != ''WhenUnderutilized'' || self.consolidateAfter == ''Never'' : true' - message: consolidateAfter must be specified with consolidationPolicy=WhenEmpty rule: 'self.consolidationPolicy == ''WhenEmpty'' ? has(self.consolidateAfter) : true' limits: diff --git a/pkg/apis/v1beta1/nodeclaim_status.go b/pkg/apis/v1beta1/nodeclaim_status.go index c5ea8d913d..728c98f26a 100644 --- a/pkg/apis/v1beta1/nodeclaim_status.go +++ b/pkg/apis/v1beta1/nodeclaim_status.go @@ -22,12 +22,13 @@ import ( ) const ( - ConditionTypeLaunched = "Launched" - ConditionTypeRegistered = "Registered" - ConditionTypeInitialized = "Initialized" - ConditionTypeEmpty = "Empty" - ConditionTypeDrifted = "Drifted" - ConditionTypeExpired = "Expired" + ConditionTypeLaunched = "Launched" + ConditionTypeRegistered = "Registered" + ConditionTypeInitialized = "Initialized" + ConditionTypeEmpty = "Empty" + ConditionTypeUnderutilized = "Underutilized" + ConditionTypeDrifted = "Drifted" + ConditionTypeExpired = "Expired" ) // NodeClaimStatus defines the observed state of NodeClaim diff --git a/pkg/apis/v1beta1/nodepool.go b/pkg/apis/v1beta1/nodepool.go index eaadb6d15a..c2eb1fa993 100644 --- a/pkg/apis/v1beta1/nodepool.go +++ b/pkg/apis/v1beta1/nodepool.go @@ -44,7 +44,6 @@ type NodePoolSpec struct { Template NodeClaimTemplate `json:"template"` // Disruption contains the parameters that relate to Karpenter's disruption logic // +kubebuilder:default={"consolidationPolicy": "WhenUnderutilized", "expireAfter": "720h"} - // +kubebuilder:validation:XValidation:message="consolidateAfter cannot be combined with consolidationPolicy=WhenUnderutilized",rule="has(self.consolidateAfter) ? self.consolidationPolicy != 'WhenUnderutilized' || self.consolidateAfter == 'Never' : true" // +kubebuilder:validation:XValidation:message="consolidateAfter must be specified with consolidationPolicy=WhenEmpty",rule="self.consolidationPolicy == 'WhenEmpty' ? has(self.consolidateAfter) : true" // +optional Disruption Disruption `json:"disruption"` @@ -76,6 +75,12 @@ type Disruption struct { // +kubebuilder:validation:Enum:={WhenEmpty,WhenUnderutilized} // +optional ConsolidationPolicy ConsolidationPolicy `json:"consolidationPolicy,omitempty"` + // UtilizationThreshold is defined as sum of requested resources divided by capacity + // below which a node can be considered for disruption. + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=100 + // +optional + UtilizationThreshold *int `json:"utilizationThreshold,omitempty"` // ExpireAfter is the duration the controller will wait // before terminating a node, measured from when the node is created. This // is useful to implement features like eventually consistent node upgrade, diff --git a/pkg/apis/v1beta1/nodepool_validation.go b/pkg/apis/v1beta1/nodepool_validation.go index 66161bcc16..d12f847d82 100644 --- a/pkg/apis/v1beta1/nodepool_validation.go +++ b/pkg/apis/v1beta1/nodepool_validation.go @@ -98,9 +98,6 @@ func (in *NodeClaimTemplate) validateRequirementsNodePoolKeyDoesNotExist() (errs //nolint:gocyclo func (in *Disruption) validate() (errs *apis.FieldError) { - if in.ConsolidateAfter != nil && in.ConsolidateAfter.Duration != nil && in.ConsolidationPolicy == ConsolidationPolicyWhenUnderutilized { - return errs.Also(apis.ErrGeneric("consolidateAfter cannot be combined with consolidationPolicy=WhenUnderutilized")) - } if in.ConsolidateAfter == nil && in.ConsolidationPolicy == ConsolidationPolicyWhenEmpty { return errs.Also(apis.ErrGeneric("consolidateAfter must be specified with consolidationPolicy=WhenEmpty")) } diff --git a/pkg/apis/v1beta1/nodepool_validation_cel_test.go b/pkg/apis/v1beta1/nodepool_validation_cel_test.go index 3a242e785b..204e6b3a4b 100644 --- a/pkg/apis/v1beta1/nodepool_validation_cel_test.go +++ b/pkg/apis/v1beta1/nodepool_validation_cel_test.go @@ -94,11 +94,6 @@ var _ = Describe("CEL/Validation", func() { nodePool.Spec.Disruption.ConsolidationPolicy = ConsolidationPolicyWhenEmpty Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) }) - It("should fail when setting consolidateAfter with consolidationPolicy=WhenUnderutilized", func() { - nodePool.Spec.Disruption.ConsolidateAfter = &NillableDuration{Duration: lo.ToPtr(lo.Must(time.ParseDuration("30s")))} - nodePool.Spec.Disruption.ConsolidationPolicy = ConsolidationPolicyWhenUnderutilized - Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) - }) It("should succeed when not setting consolidateAfter to 'Never' with consolidationPolicy=WhenUnderutilized", func() { nodePool.Spec.Disruption.ConsolidateAfter = &NillableDuration{Duration: nil} nodePool.Spec.Disruption.ConsolidationPolicy = ConsolidationPolicyWhenUnderutilized diff --git a/pkg/apis/v1beta1/zz_generated.deepcopy.go b/pkg/apis/v1beta1/zz_generated.deepcopy.go index 057186955b..6f7fec3040 100644 --- a/pkg/apis/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/v1beta1/zz_generated.deepcopy.go @@ -61,6 +61,11 @@ func (in *Disruption) DeepCopyInto(out *Disruption) { *out = new(NillableDuration) (*in).DeepCopyInto(*out) } + if in.UtilizationThreshold != nil { + in, out := &in.UtilizationThreshold, &out.UtilizationThreshold + *out = new(int) + **out = **in + } in.ExpireAfter.DeepCopyInto(&out.ExpireAfter) if in.Budgets != nil { in, out := &in.Budgets, &out.Budgets diff --git a/pkg/controllers/disruption/consolidation.go b/pkg/controllers/disruption/consolidation.go index 3d073d941b..bab6178561 100644 --- a/pkg/controllers/disruption/consolidation.go +++ b/pkg/controllers/disruption/consolidation.go @@ -101,6 +101,13 @@ func (c *consolidation) ShouldDisrupt(_ context.Context, cn *Candidate) bool { c.recorder.Publish(disruptionevents.Unconsolidatable(cn.Node, cn.NodeClaim, fmt.Sprintf("NodePool %q has consolidation disabled", cn.nodePool.Name))...) return false } + // Only check when UtilizationThreshold is specified to make it compatible + if cn.nodePool.Spec.Disruption.UtilizationThreshold != nil { + if !cn.NodeClaim.StatusConditions().Get(v1beta1.ConditionTypeUnderutilized).IsTrue() || + c.clock.Now().Before(cn.NodeClaim.StatusConditions().Get(v1beta1.ConditionTypeUnderutilized).LastTransitionTime.Add(*cn.nodePool.Spec.Disruption.ConsolidateAfter.Duration)) { + return false + } + } return true } diff --git a/pkg/controllers/nodeclaim/disruption/consolidation.go b/pkg/controllers/nodeclaim/disruption/consolidation.go new file mode 100644 index 0000000000..f259844aa1 --- /dev/null +++ b/pkg/controllers/nodeclaim/disruption/consolidation.go @@ -0,0 +1,135 @@ +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package disruption + +import ( + "context" + "fmt" + + "github.com/prometheus/client_golang/prometheus" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/clock" + "knative.dev/pkg/logging" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + "sigs.k8s.io/karpenter/pkg/controllers/state" + "sigs.k8s.io/karpenter/pkg/metrics" + "sigs.k8s.io/karpenter/pkg/utils/node" + nodeclaimutil "sigs.k8s.io/karpenter/pkg/utils/nodeclaim" +) + +// Consolidation is a nodeclaim sub-controller that adds or removes status conditions on nodeclaims when using WhenUnderutilized policy. +type Consolidation struct { + kubeClient client.Client + cluster *state.Cluster + clock clock.Clock +} + +//nolint:gocyclo +func (e *Consolidation) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, nodeClaim *v1beta1.NodeClaim) (reconcile.Result, error) { + hasCondition := nodeClaim.StatusConditions().Get(v1beta1.ConditionTypeUnderutilized) != nil + if nodePool.Spec.Disruption.ConsolidationPolicy != v1beta1.ConsolidationPolicyWhenUnderutilized { + if hasCondition { + _ = nodeClaim.StatusConditions().Clear(v1beta1.ConditionTypeUnderutilized) + } + return reconcile.Result{}, nil + } + if initCond := nodeClaim.StatusConditions().Get(v1beta1.ConditionTypeInitialized); initCond == nil || initCond.IsFalse() { + if hasCondition { + _ = nodeClaim.StatusConditions().Clear(v1beta1.ConditionTypeUnderutilized) + logging.FromContext(ctx).Debugf("removing consolidated status condition, isn't initialized") + } + return reconcile.Result{}, nil + } + _, err := nodeclaimutil.NodeForNodeClaim(ctx, e.kubeClient, nodeClaim) + if err != nil { + if nodeclaimutil.IsDuplicateNodeError(err) || nodeclaimutil.IsNodeNotFoundError(err) { + _ = nodeClaim.StatusConditions().Clear(v1beta1.ConditionTypeUnderutilized) + if hasCondition { + logging.FromContext(ctx).Debugf("removing underutilized status condition, doesn't have a single node mapping") + } + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Get the node to check utilization + n, err := nodeclaimutil.NodeForNodeClaim(ctx, e.kubeClient, nodeClaim) + if err != nil { + if nodeclaimutil.IsDuplicateNodeError(err) || nodeclaimutil.IsNodeNotFoundError(err) { + _ = nodeClaim.StatusConditions().Clear(v1beta1.ConditionTypeUnderutilized) + if hasCondition { + logging.FromContext(ctx).Debugf("removing underutilized status condition, doesn't have a single node mapping") + } + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + pods, err := node.GetPods(ctx, e.kubeClient, n) + if err != nil { + return reconcile.Result{}, fmt.Errorf("retrieving node pods, %w", err) + } + // Check the node utilization if the utilizationThreshold is specified, the node can be disruptted only if the utilization is below the threshold. + threshold := nodePool.Spec.Disruption.UtilizationThreshold + if threshold != nil { + cpu, err := calculateUtilizationOfResource(n, v1.ResourceCPU, pods) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to calculate CPU, %w", err) + } + memory, err := calculateUtilizationOfResource(n, v1.ResourceMemory, pods) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to calculate memory, %w", err) + } + if cpu < float64(*threshold)/100 && memory < float64(*threshold)/100 { + if !hasCondition { + nodeClaim.StatusConditions().SetTrue(v1beta1.ConditionTypeUnderutilized) + logging.FromContext(ctx).Debugf("marking underutilizate") + metrics.NodeClaimsDisruptedCounter.With(prometheus.Labels{ + metrics.TypeLabel: metrics.ConsolidationReason, + metrics.NodePoolLabel: nodeClaim.Labels[v1beta1.NodePoolLabelKey], + }).Inc() + } + } else { + if hasCondition { + _ = nodeClaim.StatusConditions().Clear(v1beta1.ConditionTypeUnderutilized) + logging.FromContext(ctx).Debugf("removing underutilized status condition, utilization increased") + } + } + } + return reconcile.Result{}, nil +} + +// CalculateUtilizationOfResource calculates utilization of a given resource for a node. +func calculateUtilizationOfResource(node *v1.Node, resourceName v1.ResourceName, pods []*v1.Pod) (float64, error) { + allocatable, found := node.Status.Allocatable[resourceName] + if !found { + return 0, fmt.Errorf("failed to get %v from %s", resourceName, node.Name) + } + if allocatable.MilliValue() == 0 { + return 0, fmt.Errorf("%v is 0 at %s", resourceName, node.Name) + } + podsRequest := resource.MustParse("0") + for _, pod := range pods { + for _, container := range pod.Spec.Containers { + if resourceValue, found := container.Resources.Requests[resourceName]; found { + podsRequest.Add(resourceValue) + } + } + } + return float64(podsRequest.MilliValue()) / float64(allocatable.MilliValue()), nil +} diff --git a/pkg/controllers/nodeclaim/disruption/controller.go b/pkg/controllers/nodeclaim/disruption/controller.go index aba7592d78..33211c348e 100644 --- a/pkg/controllers/nodeclaim/disruption/controller.go +++ b/pkg/controllers/nodeclaim/disruption/controller.go @@ -51,9 +51,10 @@ type Controller struct { kubeClient client.Client cloudProvider cloudprovider.CloudProvider - drift *Drift - expiration *Expiration - emptiness *Emptiness + drift *Drift + expiration *Expiration + emptiness *Emptiness + consolidation *Consolidation } // NewController constructs a nodeclaim disruption controller @@ -64,6 +65,7 @@ func NewController(clk clock.Clock, kubeClient client.Client, cluster *state.Clu drift: &Drift{cloudProvider: cloudProvider}, expiration: &Expiration{kubeClient: kubeClient, clock: clk}, emptiness: &Emptiness{kubeClient: kubeClient, cluster: cluster, clock: clk}, + consolidation: &Consolidation{kubeClient: kubeClient, cluster: cluster, clock: clk}, } } @@ -90,6 +92,7 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeClaim c.expiration, c.drift, c.emptiness, + c.consolidation, } for _, reconciler := range reconcilers { res, err := reconciler.Reconcile(ctx, nodePool, nodeClaim) From 80b94a10f83b9d857d9c0c1facc1b73c96b699b0 Mon Sep 17 00:00:00 2001 From: Luke Date: Mon, 27 May 2024 17:02:10 +0900 Subject: [PATCH 2/6] update lib --- .../nodeclaim/disruption/consolidation.go | 13 ++++--- pkg/controllers/provisioning/provisioner.go | 38 +++++++++++++++++-- .../provisioning/scheduling/events.go | 11 ++++++ pkg/operator/operator.go | 12 ++++++ 4 files changed, 64 insertions(+), 10 deletions(-) diff --git a/pkg/controllers/nodeclaim/disruption/consolidation.go b/pkg/controllers/nodeclaim/disruption/consolidation.go index f259844aa1..b53930d484 100644 --- a/pkg/controllers/nodeclaim/disruption/consolidation.go +++ b/pkg/controllers/nodeclaim/disruption/consolidation.go @@ -1,3 +1,10 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +/* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,12 +19,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package disruption - -import ( - "context" - "fmt" - "github.com/prometheus/client_golang/prometheus" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" diff --git a/pkg/controllers/provisioning/provisioner.go b/pkg/controllers/provisioning/provisioner.go index 9f7a39c0d9..f50ec8ef8b 100644 --- a/pkg/controllers/provisioning/provisioner.go +++ b/pkg/controllers/provisioning/provisioner.go @@ -311,11 +311,18 @@ func (p *Provisioner) Schedule(ctx context.Context) (scheduler.Results, error) { return scheduler.Results{}, err } pods := append(pendingPods, deletingNodePods...) + // filter pods which are alredy handled in last 3 minute + targetPods := lo.FilterMap(pods, func(pod *v1.Pod, _ int) (*v1.Pod, bool) { + if p.isPodHandled(ctx, pod) { + return nil, false + } + return pod, true + }) // nothing to schedule, so just return success - if len(pods) == 0 { + if len(targetPods) == 0 { return scheduler.Results{}, nil } - s, err := p.NewScheduler(ctx, pods, nodes.Active()) + s, err := p.NewScheduler(ctx, targetPods, nodes.Active()) if err != nil { if errors.Is(err, ErrNodePoolsNotFound) { log.FromContext(ctx).Info("no nodepools found") @@ -323,14 +330,37 @@ func (p *Provisioner) Schedule(ctx context.Context) (scheduler.Results, error) { } return scheduler.Results{}, fmt.Errorf("creating scheduler, %w", err) } - results := s.Solve(ctx, pods).TruncateInstanceTypes(scheduler.MaxInstanceTypes) + results := s.Solve(ctx, targetPods).TruncateInstanceTypes(scheduler.MaxInstanceTypes) if len(results.NewNodeClaims) > 0 { - log.FromContext(ctx).WithValues("Pods", pretty.Slice(lo.Map(pods, func(p *v1.Pod, _ int) string { return klog.KRef(p.Namespace, p.Name).String() }), 5), "duration", time.Since(start)).Info("found provisionable pod(s)") + log.FromContext(ctx).WithValues("Pods", pretty.Slice(lo.Map(targetPods, func(p *v1.Pod, _ int) string { return klog.KRef(p.Namespace, p.Name).String() }), 5), "duration", time.Since(start)).Info("found provisionable pod(s)") } results.Record(ctx, p.recorder, p.cluster) return results, nil } +func (p *Provisioner) isPodHandled(ctx context.Context, pod *v1.Pod) bool { + var events v1.EventList + filter := client.MatchingFields{ + "namespace": pod.Namespace, + "involvedObject.kind": "Pod", + "involvedObject.name": pod.Name, + "reason": "HandledByKarpenter", + } + if err := p.kubeClient.List(ctx, &events, filter); err == nil { + for _, event := range events.Items { + // ignore the pod if it's already handled in 3 minute + if time.Now().Before(event.LastTimestamp.Time.Add(3 * time.Minute)) { + log.FromContext(ctx).Info(fmt.Sprintf("pod %s/%s is handled", pod.Namespace, pod.Name)) + return true + } + } + } else { + log.FromContext(ctx).Error(err, fmt.Sprintf("failed to get event for %s/%s", pod.Namespace, pod.Name)) + } + p.recorder.Publish(scheduler.PodHandledEvent(pod)) + return false +} + func (p *Provisioner) Create(ctx context.Context, n *scheduler.NodeClaim, opts ...functional.Option[LaunchOptions]) (string, error) { ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("NodePool", klog.KRef("", n.NodePoolName))) options := functional.ResolveOptions(opts...) diff --git a/pkg/controllers/provisioning/scheduling/events.go b/pkg/controllers/provisioning/scheduling/events.go index 956b424653..d264b7a5e2 100644 --- a/pkg/controllers/provisioning/scheduling/events.go +++ b/pkg/controllers/provisioning/scheduling/events.go @@ -59,3 +59,14 @@ func PodFailedToScheduleEvent(pod *v1.Pod, err error) events.Event { DedupeTimeout: 5 * time.Minute, } } + +func PodHandledEvent(pod *v1.Pod) events.Event { + return events.Event{ + InvolvedObject: pod, + Type: v1.EventTypeNormal, + Reason: "HandledByKarpenter", + Message: "Pod is handled by karpenter", + DedupeValues: []string{string(pod.UID)}, + DedupeTimeout: 5 * time.Minute, + } +} diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index 66ab806e59..a9f78925e6 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -191,6 +191,18 @@ func NewOperator() (context.Context, *Operator) { lo.Must0(mgr.GetFieldIndexer().IndexField(ctx, &v1.Node{}, "spec.providerID", func(o client.Object) []string { return []string{o.(*v1.Node).Spec.ProviderID} }), "failed to setup node provider id indexer") + lo.Must0(mgr.GetFieldIndexer().IndexField(ctx, &v1.Event{}, "involvedObject.kind", func(o client.Object) []string { + return []string{o.(*v1.Event).InvolvedObject.Kind} + }), "failed to setup event kind indexer") + lo.Must0(mgr.GetFieldIndexer().IndexField(ctx, &v1.Event{}, "involvedObject.name", func(o client.Object) []string { + return []string{o.(*v1.Event).InvolvedObject.Name} + }), "failed to setup event name indexer") + lo.Must0(mgr.GetFieldIndexer().IndexField(ctx, &v1.Event{}, "namespace", func(o client.Object) []string { + return []string{o.(*v1.Event).Namespace} + }), "failed to setup event namespace indexer") + lo.Must0(mgr.GetFieldIndexer().IndexField(ctx, &v1.Event{}, "reason", func(o client.Object) []string { + return []string{o.(*v1.Event).Reason} + }), "failed to setup event reason indexer") lo.Must0(mgr.GetFieldIndexer().IndexField(ctx, &v1beta1.NodeClaim{}, "status.providerID", func(o client.Object) []string { return []string{o.(*v1beta1.NodeClaim).Status.ProviderID} }), "failed to setup nodeclaim provider id indexer") From 06cff6a66eea0e57f92c5050264e2a1be4a55057 Mon Sep 17 00:00:00 2001 From: Luke Date: Mon, 27 May 2024 17:03:29 +0900 Subject: [PATCH 3/6] fix lint --- pkg/controllers/nodeclaim/disruption/consolidation.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/controllers/nodeclaim/disruption/consolidation.go b/pkg/controllers/nodeclaim/disruption/consolidation.go index b53930d484..c76ab64096 100644 --- a/pkg/controllers/nodeclaim/disruption/consolidation.go +++ b/pkg/controllers/nodeclaim/disruption/consolidation.go @@ -11,7 +11,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -19,6 +19,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +package disruption + +import ( "github.com/prometheus/client_golang/prometheus" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" From 02be7e5fa0a00198db46c0b779934e76ed41b116 Mon Sep 17 00:00:00 2001 From: Luke Date: Mon, 27 May 2024 17:05:37 +0900 Subject: [PATCH 4/6] fix import --- pkg/controllers/nodeclaim/disruption/consolidation.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/controllers/nodeclaim/disruption/consolidation.go b/pkg/controllers/nodeclaim/disruption/consolidation.go index c76ab64096..cc3b901322 100644 --- a/pkg/controllers/nodeclaim/disruption/consolidation.go +++ b/pkg/controllers/nodeclaim/disruption/consolidation.go @@ -23,10 +23,14 @@ limitations under the License. package disruption import ( + "context" + "fmt" + "github.com/prometheus/client_golang/prometheus" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/utils/clock" + "knative.dev/pkg/apis" "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" From 0d9dd6803ab92c13eade4c7960910bce9b2e4c26 Mon Sep 17 00:00:00 2001 From: Luke Date: Mon, 27 May 2024 17:06:04 +0900 Subject: [PATCH 5/6] fix link --- pkg/controllers/nodeclaim/disruption/consolidation.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/controllers/nodeclaim/disruption/consolidation.go b/pkg/controllers/nodeclaim/disruption/consolidation.go index cc3b901322..f4e84219fb 100644 --- a/pkg/controllers/nodeclaim/disruption/consolidation.go +++ b/pkg/controllers/nodeclaim/disruption/consolidation.go @@ -30,7 +30,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/utils/clock" - "knative.dev/pkg/apis" "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" From 79843bcdb6a3d8e98718b43648420d55a3031c46 Mon Sep 17 00:00:00 2001 From: Luke Date: Mon, 27 May 2024 17:07:39 +0900 Subject: [PATCH 6/6] fix lint --- pkg/controllers/nodeclaim/disruption/consolidation.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/pkg/controllers/nodeclaim/disruption/consolidation.go b/pkg/controllers/nodeclaim/disruption/consolidation.go index f4e84219fb..fc00a66027 100644 --- a/pkg/controllers/nodeclaim/disruption/consolidation.go +++ b/pkg/controllers/nodeclaim/disruption/consolidation.go @@ -1,17 +1,11 @@ /* Copyright The Kubernetes Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at -/* -Copyright The Kubernetes Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,