diff --git a/pkg/capacityscheduling/capacity_scheduling.go b/pkg/capacityscheduling/capacity_scheduling.go index c4e47beca..8c2c6ea26 100644 --- a/pkg/capacityscheduling/capacity_scheduling.go +++ b/pkg/capacityscheduling/capacity_scheduling.go @@ -118,7 +118,7 @@ func (c *CapacityScheduling) Name() string { } // New initializes a new plugin and returns it. -func New(_ context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) { +func New(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) { c := &CapacityScheduling{ fh: handle, elasticQuotaInfos: NewElasticQuotaInfos(), @@ -136,8 +136,8 @@ func New(_ context.Context, obj runtime.Object, handle framework.Handle) (framew if err != nil { return nil, err } - // TODO: pass in context. - elasticQuotaInformer, err := dynamicCache.GetInformer(context.Background(), &v1alpha1.ElasticQuota{}) + + elasticQuotaInformer, err := dynamicCache.GetInformer(ctx, &v1alpha1.ElasticQuota{}) if err != nil { return nil, err } diff --git a/pkg/noderesourcetopology/cache/overreserve.go b/pkg/noderesourcetopology/cache/overreserve.go index 37283fda0..7346a3128 100644 --- a/pkg/noderesourcetopology/cache/overreserve.go +++ b/pkg/noderesourcetopology/cache/overreserve.go @@ -55,7 +55,8 @@ type OverReserve struct { isPodRelevant podprovider.PodFilterFunc } -func NewOverReserve(lh logr.Logger, cfg *apiconfig.NodeResourceTopologyCache, client ctrlclient.Client, podLister podlisterv1.PodLister, isPodRelevant podprovider.PodFilterFunc) (*OverReserve, error) { +func NewOverReserve(ctx context.Context, lh logr.Logger, cfg *apiconfig.NodeResourceTopologyCache, client ctrlclient.Client, + podLister podlisterv1.PodLister, isPodRelevant podprovider.PodFilterFunc) (*OverReserve, error) { if client == nil || podLister == nil { return nil, fmt.Errorf("received nil references") } @@ -64,7 +65,7 @@ func NewOverReserve(lh logr.Logger, cfg *apiconfig.NodeResourceTopologyCache, cl nrtObjs := &topologyv1alpha2.NodeResourceTopologyList{} // TODO: we should pass-in a context in the future - if err := client.List(context.Background(), nrtObjs); err != nil { + if err := client.List(ctx, nrtObjs); err != nil { return nil, err } diff --git a/pkg/noderesourcetopology/cache/overreserve_test.go b/pkg/noderesourcetopology/cache/overreserve_test.go index 55ffa2348..af4faadbe 100644 --- a/pkg/noderesourcetopology/cache/overreserve_test.go +++ b/pkg/noderesourcetopology/cache/overreserve_test.go @@ -104,13 +104,13 @@ func TestInitEmptyLister(t *testing.T) { } fakePodLister := &fakePodLister{} - - _, err = NewOverReserve(klog.Background(), nil, nil, fakePodLister, podprovider.IsPodRelevantAlways) + ctx := context.Background() + _, err = NewOverReserve(ctx, klog.Background(), nil, nil, fakePodLister, podprovider.IsPodRelevantAlways) if err == nil { t.Fatalf("accepted nil lister") } - _, err = NewOverReserve(klog.Background(), nil, fakeClient, nil, podprovider.IsPodRelevantAlways) + _, err = NewOverReserve(ctx, klog.Background(), nil, fakeClient, nil, podprovider.IsPodRelevantAlways) if err == nil { t.Fatalf("accepted nil indexer") } @@ -229,7 +229,7 @@ func TestOverreserveGetCachedNRTCopy(t *testing.T) { checkGetCachedNRTCopy( t, func(client ctrlclient.Client, podLister podlisterv1.PodLister) (Interface, error) { - return NewOverReserve(klog.Background(), nil, client, podLister, podprovider.IsPodRelevantAlways) + return NewOverReserve(context.Background(), klog.Background(), nil, client, podLister, podprovider.IsPodRelevantAlways) }, testCases..., ) @@ -727,7 +727,7 @@ func TestNodeWithForeignPods(t *testing.T) { } func mustOverReserve(t *testing.T, client ctrlclient.Client, podLister podlisterv1.PodLister) *OverReserve { - obj, err := NewOverReserve(klog.Background(), nil, client, podLister, podprovider.IsPodRelevantAlways) + obj, err := NewOverReserve(context.Background(), klog.Background(), nil, client, podLister, podprovider.IsPodRelevantAlways) if err != nil { t.Fatalf("unexpected error creating cache: %v", err) } diff --git a/pkg/noderesourcetopology/plugin.go b/pkg/noderesourcetopology/plugin.go index 7ad6619d0..1f311c6f6 100644 --- a/pkg/noderesourcetopology/plugin.go +++ b/pkg/noderesourcetopology/plugin.go @@ -119,7 +119,7 @@ func (tm *TopologyMatch) Name() string { } // New initializes a new plugin and returns it. -func New(_ context.Context, args runtime.Object, handle framework.Handle) (framework.Plugin, error) { +func New(ctx context.Context, args runtime.Object, handle framework.Handle) (framework.Plugin, error) { // we do this later to make sure klog is initialized. We don't need this anyway before this point lh := klog.Background() logging.SetLogger(lh) @@ -134,7 +134,7 @@ func New(_ context.Context, args runtime.Object, handle framework.Handle) (frame return nil, err } - nrtCache, err := initNodeTopologyInformer(lh, tcfg, handle) + nrtCache, err := initNodeTopologyInformer(ctx, lh, tcfg, handle) if err != nil { lh.Error(err, "cannot create clientset for NodeTopologyResource", "kubeConfig", handle.KubeConfig()) return nil, err diff --git a/pkg/noderesourcetopology/pluginhelpers.go b/pkg/noderesourcetopology/pluginhelpers.go index 54b95e390..78c8bbad4 100644 --- a/pkg/noderesourcetopology/pluginhelpers.go +++ b/pkg/noderesourcetopology/pluginhelpers.go @@ -17,6 +17,7 @@ limitations under the License. package noderesourcetopology import ( + "context" "time" corev1 "k8s.io/api/core/v1" @@ -40,7 +41,8 @@ const ( maxNUMAId = 64 ) -func initNodeTopologyInformer(lh logr.Logger, tcfg *apiconfig.NodeResourceTopologyMatchArgs, handle framework.Handle) (nrtcache.Interface, error) { +func initNodeTopologyInformer(ctx context.Context, lh logr.Logger, + tcfg *apiconfig.NodeResourceTopologyMatchArgs, handle framework.Handle) (nrtcache.Interface, error) { client, err := ctrlclient.New(handle.KubeConfig(), ctrlclient.Options{Scheme: scheme}) if err != nil { lh.Error(err, "cannot create client for NodeTopologyResource", "kubeConfig", handle.KubeConfig()) @@ -57,7 +59,7 @@ func initNodeTopologyInformer(lh logr.Logger, tcfg *apiconfig.NodeResourceTopolo podSharedInformer, podLister, isPodRelevant := podprovider.NewFromHandle(lh, handle, tcfg.Cache) - nrtCache, err := nrtcache.NewOverReserve(lh.WithName("nrtcache"), tcfg.Cache, client, podLister, isPodRelevant) + nrtCache, err := nrtcache.NewOverReserve(ctx, lh.WithName("nrtcache"), tcfg.Cache, client, podLister, isPodRelevant) if err != nil { return nil, err } diff --git a/pkg/sysched/sysched.go b/pkg/sysched/sysched.go index 4823cdc5a..3c8289cc1 100644 --- a/pkg/sysched/sysched.go +++ b/pkg/sysched/sysched.go @@ -230,8 +230,8 @@ func (sc *SySched) calcScore(syscalls sets.Set[string]) int { // Score invoked at the score extension point. func (sc *SySched) Score(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { // Read directly from API server because cached state in SnapSharedLister not always up-to-date - // especially during intial scheduler start. - node, err := sc.handle.ClientSet().CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + // especially during initial scheduler start. + node, err := sc.handle.ClientSet().CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return 0, nil }