Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

implement TAS support in AppWrapper integration #4174

Merged
merged 4 commits into from
Feb 13, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Makefile-test.mk
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,7 @@ run-test-tas-e2e-%: FORCE
E2E_KIND_VERSION="kindest/node:v$(K8S_VERSION)" KIND_CLUSTER_NAME=$(KIND_CLUSTER_NAME) CREATE_KIND_CLUSTER=$(CREATE_KIND_CLUSTER) \
ARTIFACTS="$(ARTIFACTS)/$@" IMAGE_TAG=$(IMAGE_TAG) GINKGO_ARGS="$(GINKGO_ARGS)" \
JOBSET_VERSION=$(JOBSET_VERSION) KUBEFLOW_VERSION=$(KUBEFLOW_VERSION) KUBEFLOW_MPI_VERSION=$(KUBEFLOW_MPI_VERSION) \
APPWRAPPER_VERSION=$(APPWRAPPER_VERSION) \
KIND_CLUSTER_FILE="tas-kind-cluster.yaml" E2E_TARGET_FOLDER="tas" \
./hack/e2e-test.sh
$(PROJECT_DIR)/bin/ginkgo-top -i $(ARTIFACTS)/$@/e2e.json > $(ARTIFACTS)/$@/e2e-top.yaml
Expand Down
3 changes: 3 additions & 0 deletions pkg/controller/jobs/appwrapper/appwrapper_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,9 @@ func (aw *AppWrapper) PodSets() ([]kueue.PodSet, error) {
ctrl.Log.Error(err, "Error returned from awutils.GetPodSets", "appwrapper", aw)
return nil, err
}
for idx := range podSets {
podSets[idx].TopologyRequest = jobframework.PodSetTopologyRequest(&podSets[idx].Template.ObjectMeta, nil, nil, nil)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right, there is no rank-based ordering for now.

IIUC this requires checking the type of the wrapped job and preparing configuration depending on this.
We could probably register some "rank-based ordering" factory by framework, and you could do the lookup here to construct the input.

However, this is substantially more work so I suggest it as a follow up.

}
return podSets, nil
}

Expand Down
2 changes: 1 addition & 1 deletion site/content/en/docs/overview/_index.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ A core design principle for Kueue is to avoid duplicating mature functionality i
| [LendingLimit](/docs/concepts/cluster_queue/#lendinglimit) | + | + | + | + | + | + | + | + | + | + | + |
| [All-or-nothing with ready Pods](/docs/concepts/workload/#all-or-nothing-semantics-for-job-resource-assignment) | + | + | + | + | + | + | + | + | + | + | + |
| [Fair Sharing](/docs/concepts/preemption/#fair-sharing) | + | + | + | + | + | + | + | + | + | + | + |
| [Topology Aware Scheduling](/docs/concepts/topology_aware_scheduling) | + | + | + | + | + | + | + | + | + | + | |
| [Topology Aware Scheduling](/docs/concepts/topology_aware_scheduling) | + | + | + | + | + | + | + | + | + | + | + |

## High-level Kueue operation

Expand Down
132 changes: 132 additions & 0 deletions test/e2e/tas/appwrapper_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
/*
Copyright 2025 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package tase2e

import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"sigs.k8s.io/controller-runtime/pkg/client"

kueuealpha "sigs.k8s.io/kueue/apis/kueue/v1alpha1"
kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1"
"sigs.k8s.io/kueue/pkg/util/testing"
awtesting "sigs.k8s.io/kueue/pkg/util/testingjobs/appwrapper"
utiltestingjob "sigs.k8s.io/kueue/pkg/util/testingjobs/job"
"sigs.k8s.io/kueue/test/util"
)

var _ = ginkgo.Describe("TopologyAwareScheduling for AppWrapper", func() {
var (
ns *corev1.Namespace
topology *kueuealpha.Topology
tasFlavor *kueue.ResourceFlavor
clusterQueue *kueue.ClusterQueue
localQueue *kueue.LocalQueue
)

ginkgo.BeforeEach(func() {
ns = &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "e2e-tas-aw-",
},
}
gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed())

topology = testing.MakeDefaultThreeLevelTopology("datacenter")
gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed())

tasFlavor = testing.MakeResourceFlavor("tas-flavor").
NodeLabel(tasNodeGroupLabel, instanceType).
TopologyName(topology.Name).
Obj()
gomega.Expect(k8sClient.Create(ctx, tasFlavor)).Should(gomega.Succeed())

clusterQueue = testing.MakeClusterQueue("cluster-queue").
ResourceGroup(
*testing.MakeFlavorQuotas(tasFlavor.Name).
Resource(corev1.ResourceCPU, "1").
Resource(extraResource, "8").
Obj(),
).
Obj()
gomega.Expect(k8sClient.Create(ctx, clusterQueue)).Should(gomega.Succeed())
util.ExpectClusterQueuesToBeActive(ctx, k8sClient, clusterQueue)

localQueue = testing.MakeLocalQueue("local-queue", ns.Name).ClusterQueue(clusterQueue.Name).Obj()
gomega.Expect(k8sClient.Create(ctx, localQueue)).Should(gomega.Succeed())
util.ExpectLocalQueuesToBeActive(ctx, k8sClient, localQueue)
})
ginkgo.AfterEach(func() {
gomega.Expect(util.DeleteAllAppWrappersInNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
util.ExpectObjectToBeDeleted(ctx, k8sClient, clusterQueue, true)
util.ExpectObjectToBeDeleted(ctx, k8sClient, tasFlavor, true)
util.ExpectObjectToBeDeleted(ctx, k8sClient, topology, true)
util.ExpectAllPodsInNamespaceDeleted(ctx, k8sClient, ns)
})

ginkgo.When("Creating an AppWrapper", func() {
ginkgo.It("Should place pods", func() {
numPods := 4

aw := awtesting.MakeAppWrapper("appwrapper", ns.Name).
Component(utiltestingjob.MakeJob("job-0", ns.Name).
Parallelism(int32(numPods)).
Completions(int32(numPods)).
Request(corev1.ResourceCPU, "100m").
Limit(corev1.ResourceCPU, "100m").
Request(extraResource, "1").
Limit(extraResource, "1").
Suspend(false).
Image(util.E2eTestSleepImage, []string{"60s"}).
PodAnnotation(kueuealpha.PodSetPreferredTopologyAnnotation, testing.DefaultRackTopologyLevel).
SetTypeMeta().Obj()).
Queue(localQueue.Name).
Obj()

gomega.Expect(k8sClient.Create(ctx, aw)).Should(gomega.Succeed())

ginkgo.By("AppWrapper is unsuspended", func() {
gomega.Eventually(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(aw), aw)).To(gomega.Succeed())
g.Expect(aw.Spec.Suspend).Should(gomega.BeFalse())
}, util.Timeout, util.Interval).Should(gomega.Succeed())
})

pods := &corev1.PodList{}
ginkgo.By("ensure all pods are created", func() {
gomega.Eventually(func(g gomega.Gomega) {
g.Expect(k8sClient.List(ctx, pods, client.InNamespace(ns.Name))).To(gomega.Succeed())
g.Expect(pods.Items).Should(gomega.HaveLen(numPods))
}, util.LongTimeout, util.Interval).Should(gomega.Succeed())
})

ginkgo.By("ensure all pods are scheduled", func() {
listOpts := &client.ListOptions{
FieldSelector: fields.OneTermNotEqualSelector("spec.nodeName", ""),
}
gomega.Eventually(func(g gomega.Gomega) {
g.Expect(k8sClient.List(ctx, pods, client.InNamespace(ns.Name), listOpts)).To(gomega.Succeed())
g.Expect(pods.Items).Should(gomega.HaveLen(numPods))
}, util.LongTimeout, util.Interval).Should(gomega.Succeed())
})
})
})
})
1 change: 1 addition & 0 deletions test/e2e/tas/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ var _ = ginkgo.BeforeSuite(func() {
util.WaitForJobSetAvailability(ctx, k8sClient)
util.WaitForKubeFlowTrainingOperatorAvailability(ctx, k8sClient)
util.WaitForKubeFlowMPIOperatorAvailability(ctx, k8sClient)
util.WaitForAppWrapperAvailability(ctx, k8sClient)
ginkgo.GinkgoLogr.Info(
"Kueue, JobSet, KubeFlow Training and KubeFlow MPI operators are available in the cluster",
"waitingTime", time.Since(waitForAvailableStart),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,25 +20,31 @@ import (
"fmt"
"time"

"github.com/google/go-cmp/cmp/cmpopts"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
awv1beta2 "github.com/project-codeflare/appwrapper/api/v1beta2"
corev1 "k8s.io/api/core/v1"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/clock"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"

configapi "sigs.k8s.io/kueue/apis/config/v1beta1"
kueuealpha "sigs.k8s.io/kueue/apis/kueue/v1alpha1"
kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1"
"sigs.k8s.io/kueue/pkg/controller/constants"
"sigs.k8s.io/kueue/pkg/controller/jobframework"
workloadaw "sigs.k8s.io/kueue/pkg/controller/jobs/appwrapper"
"sigs.k8s.io/kueue/pkg/features"
"sigs.k8s.io/kueue/pkg/util/testing"
testingaw "sigs.k8s.io/kueue/pkg/util/testingjobs/appwrapper"
utiltestingjob "sigs.k8s.io/kueue/pkg/util/testingjobs/job"
testingnode "sigs.k8s.io/kueue/pkg/util/testingjobs/node"
"sigs.k8s.io/kueue/pkg/workload"
"sigs.k8s.io/kueue/test/util"
)
Expand Down Expand Up @@ -833,3 +839,130 @@ var _ = ginkgo.Describe("AppWrapper controller interacting with scheduler", gink
util.ExpectReservingActiveWorkloadsMetric(clusterQueue, 1)
})
})

var _ = ginkgo.Describe("AppWrapper controller when TopologyAwareScheduling enabled", ginkgo.Ordered, ginkgo.ContinueOnFailure, func() {
const (
nodeGroupLabel = "node-group"
tasBlockLabel = "cloud.com/topology-block"
)

var (
ns *corev1.Namespace
nodes []corev1.Node
topology *kueuealpha.Topology
tasFlavor *kueue.ResourceFlavor
clusterQueue *kueue.ClusterQueue
localQueue *kueue.LocalQueue
)

ginkgo.BeforeAll(func() {
fwk.StartManager(ctx, cfg, managerAndSchedulerSetup(true))
})

ginkgo.AfterAll(func() {
fwk.StopManager(ctx)
})

ginkgo.BeforeEach(func() {
features.SetFeatureGateDuringTest(ginkgo.GinkgoTB(), features.TopologyAwareScheduling, true)

ns = &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "tas-aw-",
},
}
gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed())

nodes = []corev1.Node{
*testingnode.MakeNode("b1").
Label("node-group", "tas").
Label(tasBlockLabel, "b1").
StatusAllocatable(corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
}).
Ready().
Obj(),
}
util.CreateNodes(ctx, k8sClient, nodes)

topology = testing.MakeTopology("default").Levels(tasBlockLabel).Obj()
gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed())

tasFlavor = testing.MakeResourceFlavor("tas-flavor").
NodeLabel(nodeGroupLabel, "tas").
TopologyName("default").Obj()
gomega.Expect(k8sClient.Create(ctx, tasFlavor)).Should(gomega.Succeed())

clusterQueue = testing.MakeClusterQueue("cluster-queue").
ResourceGroup(*testing.MakeFlavorQuotas(tasFlavor.Name).Resource(corev1.ResourceCPU, "5").Obj()).
Obj()
gomega.Expect(k8sClient.Create(ctx, clusterQueue)).Should(gomega.Succeed())
util.ExpectClusterQueuesToBeActive(ctx, k8sClient, clusterQueue)

localQueue = testing.MakeLocalQueue("local-queue", ns.Name).ClusterQueue(clusterQueue.Name).Obj()
gomega.Expect(k8sClient.Create(ctx, localQueue)).Should(gomega.Succeed())
})

ginkgo.AfterEach(func() {
gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
util.ExpectObjectToBeDeleted(ctx, k8sClient, clusterQueue, true)
util.ExpectObjectToBeDeleted(ctx, k8sClient, tasFlavor, true)
util.ExpectObjectToBeDeleted(ctx, k8sClient, topology, true)
for _, node := range nodes {
util.ExpectObjectToBeDeleted(ctx, k8sClient, &node, true)
}
})

ginkgo.It("should admit workload which fits in a required topology domain", func() {
aw := testingaw.MakeAppWrapper(awName, ns.Name).
Component(utiltestingjob.MakeJob("job", ns.Name).
PodAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, tasBlockLabel).
Request(corev1.ResourceCPU, "1").
SetTypeMeta().
Obj()).
Queue(localQueue.Name).
Suspend(false).
Obj()
ginkgo.By("creating a job which requires block", func() {
gomega.Expect(k8sClient.Create(ctx, aw)).Should(gomega.Succeed())
})

wl := &kueue.Workload{}
wlLookupKey := types.NamespacedName{Name: workloadaw.GetWorkloadNameForAppWrapper(aw.Name, aw.UID), Namespace: ns.Name}

ginkgo.By("verify the workload is created", func() {
gomega.Eventually(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, wlLookupKey, wl)).Should(gomega.Succeed())
g.Expect(wl.Spec.PodSets).Should(gomega.BeComparableTo([]kueue.PodSet{{
Name: wl.Spec.PodSets[0].Name,
Count: 1,
TopologyRequest: &kueue.PodSetTopologyRequest{
Required: ptr.To(tasBlockLabel),
},
}}, cmpopts.IgnoreFields(kueue.PodSet{}, "Template")))
}, util.Timeout, util.Interval).Should(gomega.Succeed())
})

ginkgo.By("verify the workload is admitted", func() {
util.ExpectWorkloadsToBeAdmitted(ctx, k8sClient, wl)
util.ExpectReservingActiveWorkloadsMetric(clusterQueue, 1)
})

ginkgo.By("verify admission for the workload", func() {
wl := &kueue.Workload{}
wlLookupKey := types.NamespacedName{Name: workloadaw.GetWorkloadNameForAppWrapper(aw.Name, aw.UID), Namespace: ns.Name}
gomega.Eventually(func(g gomega.Gomega) {
g.Expect(k8sClient.Get(ctx, wlLookupKey, wl)).Should(gomega.Succeed())
g.Expect(wl.Status.Admission).ShouldNot(gomega.BeNil())
g.Expect(wl.Status.Admission.PodSetAssignments).Should(gomega.HaveLen(1))
g.Expect(wl.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo(
&kueue.TopologyAssignment{
Levels: []string{tasBlockLabel},
Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1"}}},
},
))
}, util.Timeout, util.Interval).Should(gomega.Succeed())
})
})
})
9 changes: 9 additions & 0 deletions test/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
kftraining "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
awv1beta2 "github.com/project-codeflare/appwrapper/api/v1beta2"
"github.com/prometheus/client_golang/prometheus"
zaplog "go.uber.org/zap"
"go.uber.org/zap/zapcore"
Expand Down Expand Up @@ -176,6 +177,14 @@ func DeleteAllPyTorchJobsInNamespace(ctx context.Context, c client.Client, ns *c
return nil
}

func DeleteAllAppWrappersInNamespace(ctx context.Context, c client.Client, ns *corev1.Namespace) error {
err := c.DeleteAllOf(ctx, &awv1beta2.AppWrapper{}, client.InNamespace(ns.Name), client.PropagationPolicy(metav1.DeletePropagationBackground))
if err != nil && !apierrors.IsNotFound(err) && !errors.Is(err, &apimeta.NoKindMatchError{}) {
return err
}
return nil
}

func DeleteAllPodsInNamespace(ctx context.Context, c client.Client, ns *corev1.Namespace) error {
return deleteAllPodsInNamespace(ctx, c, ns, 2)
}
Expand Down