Skip to content

Commit

Permalink
driver: use nomad 1.7 cgroup paths
Browse files Browse the repository at this point in the history
  • Loading branch information
shoenig committed Nov 7, 2023
1 parent 91a2af9 commit 2a33591
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 9 deletions.
8 changes: 4 additions & 4 deletions hack/resources.hcl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ job "resources" {
driver = "pledge"
config {
command = "/bin/cat"
args = ["/sys/fs/cgroup/nomad.slice/${NOMAD_ALLOC_ID}.${NOMAD_TASK_NAME}.scope/memory.max"]
args = ["/sys/fs/cgroup/nomad.slice/share.slice/${NOMAD_ALLOC_ID}.${NOMAD_TASK_NAME}.scope/memory.max"]
promises = "stdio rpath"
unveil = ["r:/sys/fs/cgroup/nomad.slice"]
}
Expand All @@ -21,7 +21,7 @@ job "resources" {
driver = "pledge"
config {
command = "/bin/cat"
args = ["/sys/fs/cgroup/nomad.slice/${NOMAD_ALLOC_ID}.${NOMAD_TASK_NAME}.scope/memory.max"]
args = ["/sys/fs/cgroup/nomad.slice/share.slice/${NOMAD_ALLOC_ID}.${NOMAD_TASK_NAME}.scope/memory.max"]
promises = "stdio rpath"
unveil = ["r:/sys/fs/cgroup/nomad.slice"]
}
Expand All @@ -36,7 +36,7 @@ job "resources" {
driver = "pledge"
config {
command = "/bin/cat"
args = ["/sys/fs/cgroup/nomad.slice/${NOMAD_ALLOC_ID}.${NOMAD_TASK_NAME}.scope/memory.low"]
args = ["/sys/fs/cgroup/nomad.slice/share.slice/${NOMAD_ALLOC_ID}.${NOMAD_TASK_NAME}.scope/memory.low"]
promises = "stdio rpath"
unveil = ["r:/sys/fs/cgroup/nomad.slice"]
}
Expand All @@ -51,7 +51,7 @@ job "resources" {
driver = "pledge"
config {
command = "/bin/cat"
args = ["/sys/fs/cgroup/nomad.slice/${NOMAD_ALLOC_ID}.${NOMAD_TASK_NAME}.scope/cpu.max"]
args = ["/sys/fs/cgroup/nomad.slice/share.slice/${NOMAD_ALLOC_ID}.${NOMAD_TASK_NAME}.scope/cpu.max"]
promises = "stdio rpath"
unveil = ["r:/sys/fs/cgroup/nomad.slice"]
}
Expand Down
17 changes: 12 additions & 5 deletions pkg/plugin/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,8 @@ func (p *PledgeDriver) StartTask(config *drivers.TaskConfig) (*drivers.TaskHandl
return nil, nil, fmt.Errorf("failed to compute cpu bandwidth: %w", err)
}

p.logger.Trace("resources", "memory", memory, "memory_max", memoryMax, "bandwidth", bandwidth)
cores := config.Resources.NomadResources.Cpu.ReservedCores
p.logger.Trace("resources", "memory", memory, "memory_max", memoryMax, "bandwidth", bandwidth, "cores", cores)

// create the environment for pledge
env := &pledge.Environment{
Expand All @@ -288,7 +289,7 @@ func (p *PledgeDriver) StartTask(config *drivers.TaskConfig) (*drivers.TaskHandl
Env: config.Env,
Dir: config.TaskDir().Dir,
User: config.User,
Cgroup: p.cgroup(config.AllocID, config.Name),
Cgroup: p.cgroup(config.AllocID, config.Name, cores),
Net: netns(config),
Memory: memory,
MemoryMax: memoryMax,
Expand Down Expand Up @@ -352,14 +353,17 @@ func (p *PledgeDriver) RecoverTask(handle *drivers.TaskHandle) error {

taskState.TaskConfig = handle.Config.Copy()

// cores reserved if any
cores := taskState.TaskConfig.Resources.NomadResources.Cpu.ReservedCores

// re-create the environment for pledge
env := &pledge.Environment{
Out: util.NullCloser(nil),
Err: util.NullCloser(nil),
Env: handle.Config.Env,
Dir: handle.Config.TaskDir().Dir,
User: handle.Config.User,
Cgroup: p.cgroup(handle.Config.AllocID, handle.Config.Name),
Cgroup: p.cgroup(handle.Config.AllocID, handle.Config.Name, cores),
}

runner := pledge.Recover(taskState.PID, env)
Expand Down Expand Up @@ -515,6 +519,9 @@ func (p *PledgeDriver) ExecTask(taskID string, cmd []string, timeout time.Durati
return nil, fmt.Errorf("ExecTask not implemented")
}

func (*PledgeDriver) cgroup(allocID, task string) string {
return fmt.Sprintf("/sys/fs/cgroup/nomad.slice/%s.%s.scope", allocID, task)
func (*PledgeDriver) cgroup(allocID, task string, cores []uint16) string {
if len(cores) == 0 {
return fmt.Sprintf("/sys/fs/cgroup/nomad.slice/share.slice/%s.%s.scope", allocID, task)
}
return fmt.Sprintf("/sys/fs/cgroup/nomad.slice/reserve.slice/%s.%s.scope", allocID, task)
}

0 comments on commit 2a33591

Please sign in to comment.