Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

opt vgpu kubeclient use #2911

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pkg/scheduler/api/devices/nvidia/vgpu/device_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ func (gs *GPUDevices) Allocate(kubeClient kubernetes.Interface, pod *v1.Pod) err

annotations[DeviceBindPhase] = "allocating"
annotations[BindTimeAnnotations] = strconv.FormatInt(time.Now().Unix(), 10)
err = patchPodAnnotations(pod, annotations)
err = patchPodAnnotations(kubeClient, pod, annotations)
if err != nil {
return err
}
Expand Down
22 changes: 8 additions & 14 deletions pkg/scheduler/api/devices/nvidia/vgpu/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import (
"k8s.io/klog/v2"
)

// TODO: can we use the same client as the scheduler?
var kubeClient kubernetes.Interface

func init() {
Expand Down Expand Up @@ -64,6 +65,12 @@ func NewClient() (kubernetes.Interface, error) {
return client, err
}

// UseClient uses an existing client
func UseClient(client kubernetes.Interface) error {
kubeClient = client
return nil
}

func patchNodeAnnotations(node *v1.Node, annotations map[string]string) error {
type patchMetadata struct {
Annotations map[string]string `json:"annotations,omitempty"`
Expand Down Expand Up @@ -376,8 +383,6 @@ func checkNodeGPUSharingPredicate(pod *v1.Pod, gssnap *GPUDevices, replicate boo
klog.Errorln("failed checktype", gs.Device[i].Type, val.Type)
continue
}
//total += gs.Devices[i].Count
//free += node.Devices[i].Count - node.Devices[i].Used
if val.Nums > 0 {
klog.V(3).Infoln("device", gs.Device[i].ID, "fitted")
val.Nums--
Expand All @@ -403,13 +408,12 @@ func checkNodeGPUSharingPredicate(pod *v1.Pod, gssnap *GPUDevices, replicate boo
return true, ctrdevs, nil
}

func patchPodAnnotations(pod *v1.Pod, annotations map[string]string) error {
func patchPodAnnotations(kubeClient kubernetes.Interface, pod *v1.Pod, annotations map[string]string) error {
type patchMetadata struct {
Annotations map[string]string `json:"annotations,omitempty"`
}
type patchPod struct {
Metadata patchMetadata `json:"metadata"`
//Spec patchSpec `json:"spec,omitempty"`
}

p := patchPod{}
Expand All @@ -424,15 +428,5 @@ func patchPodAnnotations(pod *v1.Pod, annotations map[string]string) error {
if err != nil {
klog.Errorf("patch pod %v failed, %v", pod.Name, err)
}
/*
Can't modify Env of pods here

patch1 := addGPUIndexPatch()
_, err = s.kubeClient.CoreV1().Pods(pod.Namespace).
Patch(context.Background(), pod.Name, k8stypes.JSONPatchType, []byte(patch1), metav1.PatchOptions{})
if err != nil {
klog.Infof("Patch1 pod %v failed, %v", pod.Name, err)
}*/

return err
}
Loading