diff --git a/pkg/scheduler/frameworkext/reservation_info.go b/pkg/scheduler/frameworkext/reservation_info.go index 79d8d681b..2eca66baf 100644 --- a/pkg/scheduler/frameworkext/reservation_info.go +++ b/pkg/scheduler/frameworkext/reservation_info.go @@ -40,6 +40,7 @@ type ReservationInfo struct { ResourceNames []corev1.ResourceName Allocatable corev1.ResourceList Allocated corev1.ResourceList + Reserved corev1.ResourceList // reserved inside the reservation AllocatablePorts framework.HostPortInfo AllocatedPorts framework.HostPortInfo AssignedPods map[types.UID]*PodRequirement @@ -80,6 +81,7 @@ func (p *PodRequirement) Clone() *PodRequirement { func NewReservationInfo(r *schedulingv1alpha1.Reservation) *ReservationInfo { var parseErrors []error allocatable := reservationutil.ReservationRequests(r) + reserved := util.GetNodeReservationFromAnnotation(r.Annotations) resourceNames := quotav1.ResourceNames(allocatable) if r.Spec.AllocatePolicy == schedulingv1alpha1.ReservationAllocatePolicyRestricted { options, err := apiext.GetReservationRestrictedOptions(r.Annotations) @@ -107,6 +109,7 @@ func NewReservationInfo(r *schedulingv1alpha1.Reservation) *ReservationInfo { Pod: reservedPod, ResourceNames: resourceNames, Allocatable: allocatable, + Reserved: reserved, AllocatablePorts: util.RequestedHostPorts(reservedPod), AssignedPods: map[types.UID]*PodRequirement{}, OwnerMatchers: ownerMatchers, @@ -118,6 +121,7 @@ func NewReservationInfoFromPod(pod *corev1.Pod) *ReservationInfo { var parseErrors []error allocatable := resource.PodRequests(pod, resource.PodResourcesOptions{}) + reserved := util.GetNodeReservationFromAnnotation(pod.Annotations) resourceNames := quotav1.ResourceNames(allocatable) options, err := apiext.GetReservationRestrictedOptions(pod.Annotations) if err == nil { @@ -148,6 +152,7 @@ func NewReservationInfoFromPod(pod *corev1.Pod) *ReservationInfo { Pod: pod, ResourceNames: resourceNames, Allocatable: allocatable, + Reserved: reserved, AllocatablePorts: util.RequestedHostPorts(pod), AssignedPods: map[types.UID]*PodRequirement{}, OwnerMatchers: ownerMatchers, @@ -344,6 +349,7 @@ func (ri *ReservationInfo) Clone() *ReservationInfo { ResourceNames: resourceNames, Allocatable: ri.Allocatable.DeepCopy(), Allocated: ri.Allocated.DeepCopy(), + Reserved: ri.Reserved.DeepCopy(), AllocatablePorts: util.CloneHostPorts(ri.AllocatablePorts), AllocatedPorts: util.CloneHostPorts(ri.AllocatedPorts), AssignedPods: assignedPods, diff --git a/pkg/scheduler/plugins/reservation/plugin.go b/pkg/scheduler/plugins/reservation/plugin.go index 857dd489a..936698b41 100644 --- a/pkg/scheduler/plugins/reservation/plugin.go +++ b/pkg/scheduler/plugins/reservation/plugin.go @@ -562,7 +562,8 @@ func fitsNode(podRequest *framework.Resource, nodeInfo *framework.NodeInfo, node var rRemained *framework.Resource if rInfo != nil { - resources := quotav1.Subtract(rInfo.Allocatable, rInfo.Allocated) + // Reservation available = Allocatable - Allocated - InnerReserved + resources := quotav1.Subtract(quotav1.Subtract(rInfo.Allocatable, rInfo.Allocated), rInfo.Reserved) rRemained = framework.NewResource(resources) } else { rRemained = dummyResource @@ -603,9 +604,10 @@ func fitsReservation(podRequest corev1.ResourceList, rInfo *frameworkext.Reserva if len(preemptibleInRR) > 0 { allocated = quotav1.SubtractWithNonNegativeResult(allocated, preemptibleInRR) } + allocatable := rInfo.Allocatable allocated = quotav1.Mask(allocated, rInfo.ResourceNames) + reserved := quotav1.Mask(rInfo.Reserved, rInfo.ResourceNames) requests := quotav1.Mask(podRequest, rInfo.ResourceNames) - allocatable := rInfo.Allocatable var insufficientResourceReasons []string @@ -613,7 +615,7 @@ func fitsReservation(podRequest corev1.ResourceList, rInfo *frameworkext.Reserva if maxPods, found := allocatable[corev1.ResourcePods]; found { allocatedPods := rInfo.GetAllocatedPods() if preemptiblePodsInRR, found := preemptibleInRR[corev1.ResourcePods]; found { - allocatedPods += int(preemptiblePodsInRR.Value()) // assert no overflow + allocatedPods -= int(preemptiblePodsInRR.Value()) // assert no overflow } if int64(allocatedPods)+1 > maxPods.Value() { if !isDetailed { @@ -640,6 +642,11 @@ func fitsReservation(podRequest corev1.ResourceList, rInfo *frameworkext.Reserva if !found { used = *resource.NewQuantity(0, resource.DecimalSI) } + reservedQ, found := reserved[resourceName] + if found { + // NOTE: capacity excludes the reserved resource + capacity.Sub(reservedQ) + } remained := capacity.DeepCopy() remained.Sub(used) diff --git a/pkg/scheduler/plugins/reservation/plugin_test.go b/pkg/scheduler/plugins/reservation/plugin_test.go index 8a4dc047b..4fbf18943 100644 --- a/pkg/scheduler/plugins/reservation/plugin_test.go +++ b/pkg/scheduler/plugins/reservation/plugin_test.go @@ -788,6 +788,9 @@ func Test_filterWithReservations(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-r", UID: "123456", + Annotations: map[string]string{ + apiext.AnnotationNodeReservation: `{"resources": {"cpu": "1"}}`, + }, }, Spec: schedulingv1alpha1.ReservationSpec{ AllocatePolicy: schedulingv1alpha1.ReservationAllocatePolicyRestricted, @@ -797,7 +800,7 @@ func Test_filterWithReservations(t *testing.T) { { Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("6"), + corev1.ResourceCPU: resource.MustParse("7"), corev1.ResourcePods: resource.MustParse("2"), }, }, @@ -808,7 +811,7 @@ func Test_filterWithReservations(t *testing.T) { }, Status: schedulingv1alpha1.ReservationStatus{ Allocatable: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("6"), + corev1.ResourceCPU: resource.MustParse("7"), corev1.ResourcePods: resource.MustParse("2"), }, }, @@ -1010,6 +1013,53 @@ func Test_filterWithReservations(t *testing.T) { }, wantStatus: nil, }, + { + name: "filter restricted reservation with affinity", + stateData: &stateData{ + schedulingStateData: schedulingStateData{ + hasAffinity: true, + podRequests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("6"), + corev1.ResourceMemory: resource.MustParse("8Gi"), + }, + nodeReservationStates: map[string]*nodeReservationState{ + node.Name: { + podRequested: &framework.Resource{ + MilliCPU: 30 * 1000, + Memory: 24 * 1024 * 1024 * 1024, + }, + rAllocated: &framework.Resource{ + MilliCPU: 0, + }, + matchedOrIgnored: []*frameworkext.ReservationInfo{ + frameworkext.NewReservationInfo(&schedulingv1alpha1.Reservation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-r", + }, + Spec: schedulingv1alpha1.ReservationSpec{ + AllocatePolicy: schedulingv1alpha1.ReservationAllocatePolicyRestricted, + Template: &corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("6"), + }, + }, + }, + }, + }, + }, + }, + }), + }, + }, + }, + }, + }, + wantStatus: nil, + }, { name: "filter restricted reservation with nodeInfo and matched requests are zero", stateData: &stateData{ @@ -1237,6 +1287,56 @@ func Test_filterWithReservations(t *testing.T) { }, wantStatus: nil, }, + { + name: "failed to filter restricted reservation due to reserved", + stateData: &stateData{ + schedulingStateData: schedulingStateData{ + hasAffinity: true, + podRequests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("6"), + corev1.ResourceMemory: resource.MustParse("8Gi"), + }, + nodeReservationStates: map[string]*nodeReservationState{ + node.Name: { + podRequested: &framework.Resource{ + MilliCPU: 30 * 1000, + Memory: 24 * 1024 * 1024 * 1024, + }, + rAllocated: &framework.Resource{ + MilliCPU: 0, + }, + matchedOrIgnored: []*frameworkext.ReservationInfo{ + frameworkext.NewReservationInfo(&schedulingv1alpha1.Reservation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-r", + Annotations: map[string]string{ + apiext.AnnotationNodeReservation: `{"resources": {"cpu": "2"}}`, + }, + }, + Spec: schedulingv1alpha1.ReservationSpec{ + AllocatePolicy: schedulingv1alpha1.ReservationAllocatePolicyRestricted, + Template: &corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("6"), + }, + }, + }, + }, + }, + }, + }, + }), + }, + }, + }, + }, + }, + wantStatus: framework.NewStatus(framework.Unschedulable, "Reservation(s) Insufficient cpu"), + }, { name: "filter default reservations with preemption", stateData: &stateData{ @@ -1853,8 +1953,8 @@ func Test_filterWithReservations(t *testing.T) { preemptibleInRRs: map[string]map[types.UID]corev1.ResourceList{ node.Name: { "123456": { - corev1.ResourceCPU: resource.MustParse("-1"), - corev1.ResourcePods: resource.MustParse("-1"), + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourcePods: resource.MustParse("1"), }, }, }, @@ -1904,6 +2004,42 @@ func Test_filterWithReservations(t *testing.T) { wantStatus: framework.NewStatus(framework.Unschedulable, "Reservation(s) Too many pods, "+ "requested: 1, used: 2, capacity: 2"), }, + { + name: "failed to filter restricted reservation with name and reserved since insufficient resource", + stateData: &stateData{ + schedulingStateData: schedulingStateData{ + hasAffinity: true, + reservationName: "test-r", + podRequests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("6"), + }, + preemptibleInRRs: map[string]map[types.UID]corev1.ResourceList{ + node.Name: { + "123456": { + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourcePods: resource.MustParse("1"), + }, + }, + }, + nodeReservationStates: map[string]*nodeReservationState{ + node.Name: { + podRequested: &framework.Resource{ + MilliCPU: 30 * 1000, + Memory: 24 * 1024 * 1024 * 1024, + }, + rAllocated: &framework.Resource{ + MilliCPU: 2000, + }, + matchedOrIgnored: []*frameworkext.ReservationInfo{ + testRInfo.Clone(), + }, + }, + }, + }, + }, + wantStatus: framework.NewStatus(framework.Unschedulable, "Reservation(s) Insufficient cpu, "+ + "requested: 6000, used: 1000, capacity: 6000"), + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) {