diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go index e0f070e610a9..fd4754039707 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go @@ -20,6 +20,7 @@ import ( "fmt" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/rand" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog" "k8s.io/kubernetes/pkg/features" @@ -29,16 +30,20 @@ import ( // CSIMaxVolumeLimitChecker defines predicate needed for counting CSI volumes type CSIMaxVolumeLimitChecker struct { - pvInfo PersistentVolumeInfo - pvcInfo PersistentVolumeClaimInfo + pvInfo PersistentVolumeInfo + pvcInfo PersistentVolumeClaimInfo + scInfo StorageClassInfo + randomVolumeIDPrefix string } // NewCSIMaxVolumeLimitPredicate returns a predicate for counting CSI volumes func NewCSIMaxVolumeLimitPredicate( - pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) FitPredicate { + pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, scInfo StorageClassInfo) FitPredicate { c := &CSIMaxVolumeLimitChecker{ - pvInfo: pvInfo, - pvcInfo: pvcInfo, + pvInfo: pvInfo, + pvcInfo: pvcInfo, + scInfo: scInfo, + randomVolumeIDPrefix: rand.String(32), } return c.attachableLimitPredicate } @@ -129,28 +134,70 @@ func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes( continue } - pvName := pvc.Spec.VolumeName - // TODO - the actual handling of unbound PVCs will be fixed by late binding design. - if pvName == "" { - klog.V(4).Infof("Persistent volume had no name for claim %s/%s", namespace, pvcName) + driverName, volumeHandle := c.getCSIDriver(pvc) + // if we can't find driver name or volume handle - we don't count this volume. + if driverName == "" || volumeHandle == "" { continue } - pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) - - if err != nil { - klog.V(4).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName) - continue - } - - csiSource := pv.Spec.PersistentVolumeSource.CSI - if csiSource == nil { - klog.V(4).Infof("Not considering non-CSI volume %s/%s", namespace, pvcName) - continue - } - driverName := csiSource.Driver volumeLimitKey := volumeutil.GetCSIAttachLimitKey(driverName) - result[csiSource.VolumeHandle] = volumeLimitKey + result[volumeHandle] = volumeLimitKey } return nil } + +func (c *CSIMaxVolumeLimitChecker) getCSIDriver(pvc *v1.PersistentVolumeClaim) (string, string) { + pvName := pvc.Spec.VolumeName + namespace := pvc.Namespace + pvcName := pvc.Name + + placeHolderCSIDriver := "" + placeHolderHandle := "" + if pvName == "" { + klog.V(5).Infof("Persistent volume had no name for claim %s/%s", namespace, pvcName) + return c.getDriverNameFromSC(pvc) + } + pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) + + if err != nil { + klog.V(4).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName) + // If we can't fetch PV associated with PVC, may be it got deleted + // or PVC was prebound to a PVC that hasn't been created yet. + // fallback to using StorageClass for volume counting + return c.getDriverNameFromSC(pvc) + } + + csiSource := pv.Spec.PersistentVolumeSource.CSI + if csiSource == nil { + klog.V(5).Infof("Not considering non-CSI volume %s/%s", namespace, pvcName) + return placeHolderCSIDriver, placeHolderHandle + } + return csiSource.Driver, csiSource.VolumeHandle +} + +func (c *CSIMaxVolumeLimitChecker) getDriverNameFromSC(pvc *v1.PersistentVolumeClaim) (string, string) { + namespace := pvc.Namespace + pvcName := pvc.Name + scName := pvc.Spec.StorageClassName + + placeHolderCSIDriver := "" + placeHolderHandle := "" + if scName == nil { + // if StorageClass is not set or found, then PVC must be using immediate binding mode + // and hence it must be bound before scheduling. So it is safe to not count it. + klog.V(5).Infof("pvc %s/%s has no storageClass", namespace, pvcName) + return placeHolderCSIDriver, placeHolderHandle + } + + storageClass, err := c.scInfo.GetStorageClassInfo(*scName) + if err != nil { + klog.V(5).Infof("no storage %s found for pvc %s/%s", *scName, namespace, pvcName) + return placeHolderCSIDriver, placeHolderHandle + } + + // We use random prefix to avoid conflict with volume-ids. If PVC is bound in the middle + // predicate and there is another pod(on same node) that uses same volume then we will overcount + // the volume and consider both volumes as different. + volumeHandle := fmt.Sprintf("%s-%s/%s", c.randomVolumeIDPrefix, namespace, pvcName) + return storageClass.Provisioner, volumeHandle +} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate_test.go b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate_test.go index d89da3815577..3cc4d8dbc542 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate_test.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate_test.go @@ -17,6 +17,7 @@ limitations under the License. package predicates import ( + "fmt" "reflect" "testing" @@ -35,7 +36,7 @@ func TestCSIVolumeCountPredicate(t *testing.T) { { VolumeSource: v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "csi-ebs", + ClaimName: "csi-ebs-0", }, }, }, @@ -77,38 +78,180 @@ func TestCSIVolumeCountPredicate(t *testing.T) { }, } + pendingVolumePod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "csi-4", + }, + }, + }, + }, + }, + } + + // Different pod than pendingVolumePod, but using the same unbound PVC + unboundPVCPod2 := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "csi-4", + }, + }, + }, + }, + }, + } + + missingPVPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "csi-6", + }, + }, + }, + }, + }, + } + + noSCPVCPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "csi-5", + }, + }, + }, + }, + }, + } + gceTwoVolPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "cs-gce-1", + }, + }, + }, + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "csi-gce-2", + }, + }, + }, + }, + }, + } + tests := []struct { newPod *v1.Pod existingPods []*v1.Pod filterName string maxVols int + driverNames []string fits bool test string }{ { newPod: oneVolPod, existingPods: []*v1.Pod{runningPod, twoVolPod}, - filterName: "csi-ebs", + filterName: "csi", maxVols: 4, + driverNames: []string{"ebs"}, fits: true, test: "fits when node capacity >= new pods CSI volume", }, { newPod: oneVolPod, existingPods: []*v1.Pod{runningPod, twoVolPod}, - filterName: "csi-ebs", + filterName: "csi", maxVols: 2, + driverNames: []string{"ebs"}, fits: false, test: "doesn't when node capacity <= pods CSI volume", }, + // should count pending PVCs + { + newPod: oneVolPod, + existingPods: []*v1.Pod{pendingVolumePod, twoVolPod}, + filterName: "csi", + maxVols: 2, + driverNames: []string{"ebs"}, + fits: false, + test: "count pending PVCs towards capacity <= pods CSI volume", + }, + // two same pending PVCs should be counted as 1 + { + newPod: oneVolPod, + existingPods: []*v1.Pod{pendingVolumePod, unboundPVCPod2, twoVolPod}, + filterName: "csi", + maxVols: 3, + driverNames: []string{"ebs"}, + fits: true, + test: "count multiple pending pvcs towards capacity >= pods CSI volume", + }, + // should count PVCs with invalid PV name but valid SC + { + newPod: oneVolPod, + existingPods: []*v1.Pod{missingPVPod, twoVolPod}, + filterName: "csi", + maxVols: 2, + driverNames: []string{"ebs"}, + fits: false, + test: "should count PVCs with invalid PV name but valid SC", + }, + // don't count a volume which has storageclass missing + { + newPod: oneVolPod, + existingPods: []*v1.Pod{runningPod, noSCPVCPod}, + filterName: "csi", + maxVols: 2, + driverNames: []string{"ebs"}, + fits: true, + test: "don't count pvcs with missing SC towards capacity", + }, + // don't count multiple volume types + { + newPod: oneVolPod, + existingPods: []*v1.Pod{gceTwoVolPod, twoVolPod}, + filterName: "csi", + maxVols: 2, + driverNames: []string{"ebs", "gce"}, + fits: true, + test: "don't count pvcs with different type towards capacity", + }, + { + newPod: gceTwoVolPod, + existingPods: []*v1.Pod{twoVolPod, runningPod}, + filterName: "csi", + maxVols: 2, + driverNames: []string{"ebs", "gce"}, + fits: true, + test: "don't count pvcs with different type towards capacity", + }, } defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)() expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded} // running attachable predicate tests with feature gate and limit present on nodes for _, test := range tests { - node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName) - pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo("csi-ebs", "csi-ebs"), getFakeCSIPVCInfo("csi-ebs")) + node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.driverNames...) + pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo(test.filterName, test.driverNames...), + getFakeCSIPVCInfo(test.filterName, "csi-sc", test.driverNames...), + getFakeCSIStorageClassInfo("csi-sc", test.driverNames[0])) + fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node) if err != nil { t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err) @@ -122,57 +265,63 @@ func TestCSIVolumeCountPredicate(t *testing.T) { } } -func getFakeCSIPVInfo(volumeName, driverName string) FakePersistentVolumeInfo { - return FakePersistentVolumeInfo{ - { - ObjectMeta: metav1.ObjectMeta{Name: volumeName}, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - CSI: &v1.CSIPersistentVolumeSource{ - Driver: driverName, - VolumeHandle: volumeName, - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-2"}, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - CSI: &v1.CSIPersistentVolumeSource{ - Driver: driverName, - VolumeHandle: volumeName + "-2", - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-3"}, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - CSI: &v1.CSIPersistentVolumeSource{ - Driver: driverName, - VolumeHandle: volumeName + "-3", +func getFakeCSIPVInfo(volumeName string, driverNames ...string) FakePersistentVolumeInfo { + pvInfos := FakePersistentVolumeInfo{} + for _, driver := range driverNames { + for j := 0; j < 4; j++ { + volumeHandle := fmt.Sprintf("%s-%s-%d", volumeName, driver, j) + pv := v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{Name: volumeHandle}, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{ + Driver: driver, + VolumeHandle: volumeHandle, + }, }, }, - }, - }, + } + pvInfos = append(pvInfos, pv) + } + } + return pvInfos } -func getFakeCSIPVCInfo(volumeName string) FakePersistentVolumeClaimInfo { - return FakePersistentVolumeClaimInfo{ - { - ObjectMeta: metav1.ObjectMeta{Name: volumeName}, - Spec: v1.PersistentVolumeClaimSpec{VolumeName: volumeName}, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-2"}, - Spec: v1.PersistentVolumeClaimSpec{VolumeName: volumeName + "-2"}, - }, +func getFakeCSIPVCInfo(volumeName, scName string, driverNames ...string) FakePersistentVolumeClaimInfo { + pvcInfos := FakePersistentVolumeClaimInfo{} + for _, driver := range driverNames { + for j := 0; j < 4; j++ { + v := fmt.Sprintf("%s-%s-%d", volumeName, driver, j) + pvc := v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: v}, + Spec: v1.PersistentVolumeClaimSpec{VolumeName: v}, + } + pvcInfos = append(pvcInfos, pvc) + } + } + + pvcInfos = append(pvcInfos, v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-4"}, + Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &scName}, + }) + pvcInfos = append(pvcInfos, v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-5"}, + Spec: v1.PersistentVolumeClaimSpec{}, + }) + // a pvc with missing PV but available storageclass. + pvcInfos = append(pvcInfos, v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-6"}, + Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &scName, VolumeName: "missing-in-action"}, + }) + return pvcInfos +} + +func getFakeCSIStorageClassInfo(scName, provisionerName string) FakeStorageClassInfo { + return FakeStorageClassInfo{ { - ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-3"}, - Spec: v1.PersistentVolumeClaimSpec{VolumeName: volumeName + "-3"}, + ObjectMeta: metav1.ObjectMeta{Name: scName}, + Provisioner: provisionerName, }, } } diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go index 0d28befbdb16..33fa9935a3aa 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go @@ -1047,16 +1047,17 @@ func TestMaxVolumeFuncM4(t *testing.T) { } } -func getNodeWithPodAndVolumeLimits(pods []*v1.Pod, limit int64, filter string) *schedulernodeinfo.NodeInfo { +func getNodeWithPodAndVolumeLimits(pods []*v1.Pod, limit int64, driverNames ...string) *schedulernodeinfo.NodeInfo { nodeInfo := schedulernodeinfo.NewNodeInfo(pods...) node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"}, Status: v1.NodeStatus{ - Allocatable: v1.ResourceList{ - getVolumeLimitKey(filter): *resource.NewQuantity(limit, resource.DecimalSI), - }, + Allocatable: v1.ResourceList{}, }, } + for _, driver := range driverNames { + node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI) + } nodeInfo.SetNode(node) return nodeInfo } diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/register_predicates.go b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/register_predicates.go index ba3e5dcebef0..b1283261b8d6 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/register_predicates.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/register_predicates.go @@ -81,7 +81,7 @@ func init() { factory.RegisterFitPredicateFactory( predicates.MaxCSIVolumeCountPred, func(args factory.PluginFactoryArgs) predicates.FitPredicate { - return predicates.NewCSIMaxVolumeLimitPredicate(args.PVInfo, args.PVCInfo) + return predicates.NewCSIMaxVolumeLimitPredicate(args.PVInfo, args.PVCInfo, args.StorageClassInfo) }, ) factory.RegisterFitPredicateFactory(