Skip to content

Commit

Permalink
pass existing context without using TODO()
Browse files Browse the repository at this point in the history
  • Loading branch information
googs1025 committed May 6, 2024
1 parent 51d27b6 commit f3cd486
Show file tree
Hide file tree
Showing 6 changed files with 19 additions and 16 deletions.
6 changes: 3 additions & 3 deletions pkg/capacityscheduling/capacity_scheduling.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ func (c *CapacityScheduling) Name() string {
}

// New initializes a new plugin and returns it.
func New(_ context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) {
func New(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) {
c := &CapacityScheduling{
fh: handle,
elasticQuotaInfos: NewElasticQuotaInfos(),
Expand All @@ -136,8 +136,8 @@ func New(_ context.Context, obj runtime.Object, handle framework.Handle) (framew
if err != nil {
return nil, err
}
// TODO: pass in context.
elasticQuotaInformer, err := dynamicCache.GetInformer(context.Background(), &v1alpha1.ElasticQuota{})

elasticQuotaInformer, err := dynamicCache.GetInformer(ctx, &v1alpha1.ElasticQuota{})
if err != nil {
return nil, err
}
Expand Down
5 changes: 3 additions & 2 deletions pkg/noderesourcetopology/cache/overreserve.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@ type OverReserve struct {
isPodRelevant podprovider.PodFilterFunc
}

func NewOverReserve(lh logr.Logger, cfg *apiconfig.NodeResourceTopologyCache, client ctrlclient.Client, podLister podlisterv1.PodLister, isPodRelevant podprovider.PodFilterFunc) (*OverReserve, error) {
func NewOverReserve(ctx context.Context, lh logr.Logger, cfg *apiconfig.NodeResourceTopologyCache, client ctrlclient.Client,
podLister podlisterv1.PodLister, isPodRelevant podprovider.PodFilterFunc) (*OverReserve, error) {
if client == nil || podLister == nil {
return nil, fmt.Errorf("received nil references")
}
Expand All @@ -64,7 +65,7 @@ func NewOverReserve(lh logr.Logger, cfg *apiconfig.NodeResourceTopologyCache, cl

nrtObjs := &topologyv1alpha2.NodeResourceTopologyList{}
// TODO: we should pass-in a context in the future
if err := client.List(context.Background(), nrtObjs); err != nil {
if err := client.List(ctx, nrtObjs); err != nil {
return nil, err
}

Expand Down
10 changes: 5 additions & 5 deletions pkg/noderesourcetopology/cache/overreserve_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,13 +104,13 @@ func TestInitEmptyLister(t *testing.T) {
}

fakePodLister := &fakePodLister{}

_, err = NewOverReserve(klog.Background(), nil, nil, fakePodLister, podprovider.IsPodRelevantAlways)
ctx := context.Background()
_, err = NewOverReserve(ctx, klog.Background(), nil, nil, fakePodLister, podprovider.IsPodRelevantAlways)
if err == nil {
t.Fatalf("accepted nil lister")
}

_, err = NewOverReserve(klog.Background(), nil, fakeClient, nil, podprovider.IsPodRelevantAlways)
_, err = NewOverReserve(ctx, klog.Background(), nil, fakeClient, nil, podprovider.IsPodRelevantAlways)
if err == nil {
t.Fatalf("accepted nil indexer")
}
Expand Down Expand Up @@ -229,7 +229,7 @@ func TestOverreserveGetCachedNRTCopy(t *testing.T) {
checkGetCachedNRTCopy(
t,
func(client ctrlclient.Client, podLister podlisterv1.PodLister) (Interface, error) {
return NewOverReserve(klog.Background(), nil, client, podLister, podprovider.IsPodRelevantAlways)
return NewOverReserve(context.Background(), klog.Background(), nil, client, podLister, podprovider.IsPodRelevantAlways)
},
testCases...,
)
Expand Down Expand Up @@ -727,7 +727,7 @@ func TestNodeWithForeignPods(t *testing.T) {
}

func mustOverReserve(t *testing.T, client ctrlclient.Client, podLister podlisterv1.PodLister) *OverReserve {
obj, err := NewOverReserve(klog.Background(), nil, client, podLister, podprovider.IsPodRelevantAlways)
obj, err := NewOverReserve(context.Background(), klog.Background(), nil, client, podLister, podprovider.IsPodRelevantAlways)
if err != nil {
t.Fatalf("unexpected error creating cache: %v", err)
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/noderesourcetopology/plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func (tm *TopologyMatch) Name() string {
}

// New initializes a new plugin and returns it.
func New(_ context.Context, args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
func New(ctx context.Context, args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
// we do this later to make sure klog is initialized. We don't need this anyway before this point
lh := klog.Background()
logging.SetLogger(lh)
Expand All @@ -134,7 +134,7 @@ func New(_ context.Context, args runtime.Object, handle framework.Handle) (frame
return nil, err
}

nrtCache, err := initNodeTopologyInformer(lh, tcfg, handle)
nrtCache, err := initNodeTopologyInformer(ctx, lh, tcfg, handle)
if err != nil {
lh.Error(err, "cannot create clientset for NodeTopologyResource", "kubeConfig", handle.KubeConfig())
return nil, err
Expand Down
6 changes: 4 additions & 2 deletions pkg/noderesourcetopology/pluginhelpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package noderesourcetopology

import (
"context"
"time"

corev1 "k8s.io/api/core/v1"
Expand All @@ -40,7 +41,8 @@ const (
maxNUMAId = 64
)

func initNodeTopologyInformer(lh logr.Logger, tcfg *apiconfig.NodeResourceTopologyMatchArgs, handle framework.Handle) (nrtcache.Interface, error) {
func initNodeTopologyInformer(ctx context.Context, lh logr.Logger,
tcfg *apiconfig.NodeResourceTopologyMatchArgs, handle framework.Handle) (nrtcache.Interface, error) {
client, err := ctrlclient.New(handle.KubeConfig(), ctrlclient.Options{Scheme: scheme})
if err != nil {
lh.Error(err, "cannot create client for NodeTopologyResource", "kubeConfig", handle.KubeConfig())
Expand All @@ -57,7 +59,7 @@ func initNodeTopologyInformer(lh logr.Logger, tcfg *apiconfig.NodeResourceTopolo

podSharedInformer, podLister, isPodRelevant := podprovider.NewFromHandle(lh, handle, tcfg.Cache)

nrtCache, err := nrtcache.NewOverReserve(lh.WithName("nrtcache"), tcfg.Cache, client, podLister, isPodRelevant)
nrtCache, err := nrtcache.NewOverReserve(ctx, lh.WithName("nrtcache"), tcfg.Cache, client, podLister, isPodRelevant)
if err != nil {
return nil, err
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/sysched/sysched.go
Original file line number Diff line number Diff line change
Expand Up @@ -230,8 +230,8 @@ func (sc *SySched) calcScore(syscalls sets.Set[string]) int {
// Score invoked at the score extension point.
func (sc *SySched) Score(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
// Read directly from API server because cached state in SnapSharedLister not always up-to-date
// especially during intial scheduler start.
node, err := sc.handle.ClientSet().CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
// especially during initial scheduler start.
node, err := sc.handle.ClientSet().CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
return 0, nil
}
Expand Down

0 comments on commit f3cd486

Please sign in to comment.