Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

descheduler: Add detail logs for nonRemovable pods #2000

Merged
merged 1 commit into from
Apr 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -124,10 +124,12 @@ func (a *arbitratorImpl) Start(ctx context.Context) error {
// Filter checks if a pod can be evicted
func (a *arbitratorImpl) Filter(pod *corev1.Pod) bool {
if !a.filter.filterExistingPodMigrationJob(pod) {
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", "filterExistingPodMigrationJob")
return false
}

if !a.filter.reservationFilter(pod) {
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", "reservationFilter")
return false
}

Expand Down
21 changes: 12 additions & 9 deletions pkg/descheduler/controllers/migration/arbitrator/filter.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,8 +251,8 @@ func (f *filter) filterMaxMigratingPerNode(pod *corev1.Pod) bool {
maxMigratingPerNode := int(*f.args.MaxMigratingPerNode)
exceeded := count >= maxMigratingPerNode
if exceeded {
klog.V(4).Infof("Pod %q fails to check maxMigratingPerNode because the Node %q has %d migrating Pods, exceeding the maxMigratingPerNode(%d)",
klog.KObj(pod), pod.Spec.NodeName, count, maxMigratingPerNode)
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod),
"checks", "maxMigratingPerNode", "node", pod.Spec.NodeName, "count", count, "maxMigratingPerNode", maxMigratingPerNode)
}
return !exceeded
}
Expand Down Expand Up @@ -282,8 +282,8 @@ func (f *filter) filterMaxMigratingPerNamespace(pod *corev1.Pod) bool {
maxMigratingPerNamespace := int(*f.args.MaxMigratingPerNamespace)
exceeded := count >= maxMigratingPerNamespace
if exceeded {
klog.V(4).Infof("Pod %q fails to check maxMigratingPerNamespace because the Namespace %q has %d migrating Pods, exceeding the maxMigratingPerNamespace(%d)",
klog.KObj(pod), pod.Namespace, count, maxMigratingPerNamespace)
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod),
"checks", "maxMigratingPerNamespace", "namespace", pod.Namespace, "count", count, "maxMigratingPerNamespace", maxMigratingPerNamespace)
}
return !exceeded
}
Expand Down Expand Up @@ -342,8 +342,9 @@ func (f *filter) filterMaxMigratingOrUnavailablePerWorkload(pod *corev1.Pod) boo
if len(migratingPods) > 0 {
exceeded := len(migratingPods) >= maxMigrating
if exceeded {
klog.V(4).Infof("The workload %s/%s/%s(%s) of Pod %q has %d migration jobs that exceed MaxMigratingPerWorkload %d",
ownerRef.Name, ownerRef.Kind, ownerRef.APIVersion, ownerRef.UID, klog.KObj(pod), len(migratingPods), maxMigrating)
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod),
"checks", "maxMigratingPerWorkload", "owner", fmt.Sprintf("%s/%s/%s(%s)", ownerRef.Name, ownerRef.Kind, ownerRef.APIVersion, ownerRef.UID),
"migratingPods", len(migratingPods), "maxMigratingPerWorkload", maxMigrating)
return false
}
}
Expand Down Expand Up @@ -383,8 +384,9 @@ func (f *filter) filterExpectedReplicas(pod *corev1.Pod) bool {
if f.args.SkipCheckExpectedReplicas == nil || !*f.args.SkipCheckExpectedReplicas {
// TODO(joseph): There are f few special scenarios where should we allow eviction?
if expectedReplicas == 1 || int(expectedReplicas) == maxMigrating || int(expectedReplicas) == maxUnavailable {
klog.Warningf("maxMigrating(%d) or maxUnavailable(%d) equals to the replicas(%d) of the workload %s/%s/%s(%s) of Pod %q, or the replicas equals to 1, please increase the replicas or update the defense configurations",
maxMigrating, maxUnavailable, expectedReplicas, ownerRef.Name, ownerRef.Kind, ownerRef.APIVersion, ownerRef.UID, klog.KObj(pod))
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", "expectedReplicas",
"owner", fmt.Sprintf("%s/%s/%s(%s)", ownerRef.Name, ownerRef.Kind, ownerRef.APIVersion, ownerRef.UID),
"maxMigrating", maxMigrating, "maxUnavailable", maxUnavailable, "expectedReplicas", expectedReplicas)
return false
}
}
Expand Down Expand Up @@ -470,7 +472,8 @@ func (f *filter) filterLimitedObject(pod *corev1.Pod) bool {
defer f.limiterLock.Unlock()
if limiter := f.objectLimiters[ownerRef.UID]; limiter != nil {
if remainTokens := limiter.Tokens() - float64(1); remainTokens < 0 {
klog.Infof("Pod %q is filtered by workload %s/%s/%s is limited", klog.KObj(pod), ownerRef.Name, ownerRef.Kind, ownerRef.APIVersion)
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", "limitedObject",
"owner", fmt.Sprintf("%s/%s/%s", ownerRef.Name, ownerRef.Kind, ownerRef.APIVersion))
return false
}
}
Expand Down
4 changes: 4 additions & 0 deletions pkg/descheduler/controllers/migration/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/klog/v2"

"github.com/koordinator-sh/koordinator/apis/extension"
sev1alpha1 "github.com/koordinator-sh/koordinator/apis/scheduling/v1alpha1"
Expand Down Expand Up @@ -115,5 +116,8 @@ func GetMaxMigrating(replicas int, intOrPercent *intstr.IntOrString) (int, error
// FilterPodWithMaxEvictionCost rejects if pod's eviction cost is math.MaxInt32
func FilterPodWithMaxEvictionCost(pod *corev1.Pod) bool {
cost, _ := extension.GetEvictionCost(pod.Annotations)
if cost == math.MaxInt32 {
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", "podWithMaxEvictionCost")
}
return !(cost == math.MaxInt32)
}
2 changes: 1 addition & 1 deletion pkg/descheduler/node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ func PodFitsAnyNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *corev1.P
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true
} else {
klog.V(5).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node), "errors", utilerrors.NewAggregate(errors))
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node), "errors", utilerrors.NewAggregate(errors))
}
}
return false
Expand Down
4 changes: 4 additions & 0 deletions pkg/descheduler/pod/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
qoshelper "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"

"github.com/koordinator-sh/koordinator/pkg/descheduler/framework"
Expand Down Expand Up @@ -100,12 +101,15 @@ func (o *Options) BuildFilterFunc() (FilterFunc, error) {
return false
}
if len(o.includedNamespaces) > 0 && !o.includedNamespaces.Has(pod.Namespace) {
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", "includedNamespaces")
return false
}
if len(o.excludedNamespaces) > 0 && o.excludedNamespaces.Has(pod.Namespace) {
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", "excludedNamespaces")
return false
}
if s != nil && !s.Matches(labels.Set(pod.GetLabels())) {
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", "labelSelector")
return false
}
return true
Expand Down
Loading