Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable security limits around init containers #9973

Merged
merged 4 commits into from
Jul 26, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions pkg/build/admission/overrides/admission.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,10 @@ func applyForcePullToPod(attributes admission.Attributes) error {
if err != nil {
return err
}
for i := range pod.Spec.InitContainers {
glog.V(5).Infof("Setting ImagePullPolicy to PullAlways on init container %s of pod %s/%s", pod.Spec.InitContainers[i].Name, pod.Namespace, pod.Name)
pod.Spec.InitContainers[i].ImagePullPolicy = kapi.PullAlways
}
for i := range pod.Spec.Containers {
glog.V(5).Infof("Setting ImagePullPolicy to PullAlways on container %s of pod %s/%s", pod.Spec.Containers[i].Name, pod.Namespace, pod.Name)
pod.Spec.Containers[i].ImagePullPolicy = kapi.PullAlways
Expand Down
3 changes: 3 additions & 0 deletions pkg/build/admission/overrides/admission_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,9 @@ func TestBuildOverrideForcePull(t *testing.T) {
if pod.Spec.Containers[0].ImagePullPolicy != kapi.PullAlways {
t.Errorf("%s (%s): image pull policy is not PullAlways", test.name, op)
}
if pod.Spec.InitContainers[0].ImagePullPolicy != kapi.PullAlways {
t.Errorf("%s (%s): image pull policy is not PullAlways", test.name, op)
}
case strategy.DockerStrategy != nil:
if strategy.DockerStrategy.ForcePull == false {
t.Errorf("%s (%s): force pull was false", test.name, op)
Expand Down
16 changes: 16 additions & 0 deletions pkg/build/admission/testutil/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,13 @@ func (p *TestPod) WithAnnotation(name, value string) *TestPod {
}

func (p *TestPod) WithEnvVar(name, value string) *TestPod {
if len(p.Spec.InitContainers) == 0 {
p.Spec.InitContainers = append(p.Spec.InitContainers, kapi.Container{})
}
if len(p.Spec.Containers) == 0 {
p.Spec.Containers = append(p.Spec.Containers, kapi.Container{})
}
p.Spec.InitContainers[0].Env = append(p.Spec.InitContainers[0].Env, kapi.EnvVar{Name: name, Value: value})
p.Spec.Containers[0].Env = append(p.Spec.Containers[0].Env, kapi.EnvVar{Name: name, Value: value})
return p
}
Expand All @@ -46,6 +50,18 @@ func (p *TestPod) WithBuild(t *testing.T, build *buildapi.Build, version string)
return p.WithAnnotation(buildapi.BuildAnnotation, build.Name).WithEnvVar("BUILD", string(encodedBuild))
}

func (p *TestPod) InitEnvValue(name string) string {
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Used anywhere or just here for future work? Didn't see it being called.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

just copied to make sure it wasn't inconsistent.

if len(p.Spec.InitContainers) == 0 {
return ""
}
for _, ev := range p.Spec.InitContainers[0].Env {
if ev.Name == name {
return ev.Value
}
}
return ""
}

func (p *TestPod) EnvValue(name string) string {
if len(p.Spec.Containers) == 0 {
return ""
Expand Down
92 changes: 50 additions & 42 deletions pkg/quota/admission/clusterresourceoverride/admission.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,54 +155,62 @@ func (a *clusterResourceOverridePlugin) Admit(attr admission.Attributes) error {

// Reuse LimitRanger logic to apply limit/req defaults from the project. Ignore validation
// errors, assume that LimitRanger will run after this plugin to validate.
glog.V(5).Infof("%s: initial pod limits are: %#v", api.PluginName, pod.Spec.Containers[0].Resources)
glog.V(5).Infof("%s: initial pod limits are: %#v", api.PluginName, pod.Spec)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a tool to go from this output in a log to something more readable?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not that I'm aware of, since this can be across multiple containers, init containers, requests, and limits (and potentially annotations). Might be nice in the future.

if err := a.LimitRanger.Admit(attr); err != nil {
glog.V(5).Infof("%s: error from LimitRanger: %#v", api.PluginName, err)
}
glog.V(5).Infof("%s: pod limits after LimitRanger are: %#v", api.PluginName, pod.Spec.Containers[0].Resources)
for _, container := range pod.Spec.Containers {
resources := container.Resources
memLimit, memFound := resources.Limits[kapi.ResourceMemory]
if memFound && a.config.memoryRequestToLimitRatio != 0 {
// memory is measured in whole bytes.
// the plugin rounds down to the nearest MiB rather than bytes to improve ease of use for end-users.
amount := memLimit.Value() * int64(a.config.memoryRequestToLimitRatio*100) / 100
// TODO: move into resource.Quantity
var mod int64
switch memLimit.Format {
case resource.BinarySI:
mod = 1024 * 1024
default:
mod = 1000 * 1000
}
if rem := amount % mod; rem != 0 {
amount = amount - rem
}
q := resource.NewQuantity(int64(amount), memLimit.Format)
if memFloor.Cmp(*q) > 0 {
q = memFloor.Copy()
}
resources.Requests[kapi.ResourceMemory] = *q
glog.V(5).Infof("%s: pod limits after LimitRanger: %#v", api.PluginName, pod.Spec)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We're no longer logging the post limitRanger limits. Intentional?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No

for i := range pod.Spec.InitContainers {
updateContainerResources(a.config, &pod.Spec.InitContainers[i])
}
for i := range pod.Spec.Containers {
updateContainerResources(a.config, &pod.Spec.Containers[i])
}
glog.V(5).Infof("%s: pod limits after overrides are: %#v", api.PluginName, pod.Spec)
return nil
}

func updateContainerResources(config *internalConfig, container *kapi.Container) {
resources := container.Resources
memLimit, memFound := resources.Limits[kapi.ResourceMemory]
if memFound && config.memoryRequestToLimitRatio != 0 {
// memory is measured in whole bytes.
// the plugin rounds down to the nearest MiB rather than bytes to improve ease of use for end-users.
amount := memLimit.Value() * int64(config.memoryRequestToLimitRatio*100) / 100
// TODO: move into resource.Quantity
var mod int64
switch memLimit.Format {
case resource.BinarySI:
mod = 1024 * 1024
default:
mod = 1000 * 1000
}
if rem := amount % mod; rem != 0 {
amount = amount - rem
}
if memFound && a.config.limitCPUToMemoryRatio != 0 {
amount := float64(memLimit.Value()) * a.config.limitCPUToMemoryRatio * cpuBaseScaleFactor
q := resource.NewMilliQuantity(int64(amount), resource.DecimalSI)
if cpuFloor.Cmp(*q) > 0 {
q = cpuFloor.Copy()
}
resources.Limits[kapi.ResourceCPU] = *q
q := resource.NewQuantity(int64(amount), memLimit.Format)
if memFloor.Cmp(*q) > 0 {
q = memFloor.Copy()
}
resources.Requests[kapi.ResourceMemory] = *q
}
if memFound && config.limitCPUToMemoryRatio != 0 {
amount := float64(memLimit.Value()) * config.limitCPUToMemoryRatio * cpuBaseScaleFactor
q := resource.NewMilliQuantity(int64(amount), resource.DecimalSI)
if cpuFloor.Cmp(*q) > 0 {
q = cpuFloor.Copy()
}
resources.Limits[kapi.ResourceCPU] = *q
}

cpuLimit, cpuFound := resources.Limits[kapi.ResourceCPU]
if cpuFound && a.config.cpuRequestToLimitRatio != 0 {
amount := float64(cpuLimit.MilliValue()) * a.config.cpuRequestToLimitRatio
q := resource.NewMilliQuantity(int64(amount), cpuLimit.Format)
if cpuFloor.Cmp(*q) > 0 {
q = cpuFloor.Copy()
}
resources.Requests[kapi.ResourceCPU] = *q
cpuLimit, cpuFound := resources.Limits[kapi.ResourceCPU]
if cpuFound && config.cpuRequestToLimitRatio != 0 {
amount := float64(cpuLimit.MilliValue()) * config.cpuRequestToLimitRatio
q := resource.NewMilliQuantity(int64(amount), cpuLimit.Format)
if cpuFloor.Cmp(*q) > 0 {
q = cpuFloor.Copy()
}
resources.Requests[kapi.ResourceCPU] = *q
}
glog.V(5).Infof("%s: pod limits after overrides are: %#v", api.PluginName, pod.Spec.Containers[0].Resources)
return nil

}
32 changes: 31 additions & 1 deletion pkg/quota/admission/clusterresourceoverride/admission_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,18 @@ func TestLimitRequestAdmission(t *testing.T) {
t.Errorf("%s: admission controller returned error: %v", test.name, err)
continue
}
resources := test.pod.Spec.Containers[0].Resources // only test one container
resources := test.pod.Spec.InitContainers[0].Resources // only test one container
if actual := resources.Requests[kapi.ResourceMemory]; test.expectedMemRequest.Cmp(actual) != 0 {
t.Errorf("%s: memory requests do not match; %v should be %v", test.name, actual, test.expectedMemRequest)
}
if actual := resources.Requests[kapi.ResourceCPU]; test.expectedCpuRequest.Cmp(actual) != 0 {
t.Errorf("%s: cpu requests do not match; %v should be %v", test.name, actual, test.expectedCpuRequest)
}
if actual := resources.Limits[kapi.ResourceCPU]; test.expectedCpuLimit.Cmp(actual) != 0 {
t.Errorf("%s: cpu limits do not match; %v should be %v", test.name, actual, test.expectedCpuLimit)
}

resources = test.pod.Spec.Containers[0].Resources // only test one container
if actual := resources.Requests[kapi.ResourceMemory]; test.expectedMemRequest.Cmp(actual) != 0 {
t.Errorf("%s: memory requests do not match; %v should be %v", test.name, actual, test.expectedMemRequest)
}
Expand All @@ -241,6 +252,11 @@ func TestLimitRequestAdmission(t *testing.T) {
func testBestEffortPod() *kapi.Pod {
return &kapi.Pod{
Spec: kapi.PodSpec{
InitContainers: []kapi.Container{
{
Resources: kapi.ResourceRequirements{},
},
},
Containers: []kapi.Container{
{
Resources: kapi.ResourceRequirements{},
Expand All @@ -253,6 +269,20 @@ func testBestEffortPod() *kapi.Pod {
func testPod(memLimit string, memRequest string, cpuLimit string, cpuRequest string) *kapi.Pod {
return &kapi.Pod{
Spec: kapi.PodSpec{
InitContainers: []kapi.Container{
{
Resources: kapi.ResourceRequirements{
Limits: kapi.ResourceList{
kapi.ResourceCPU: resource.MustParse(cpuLimit),
kapi.ResourceMemory: resource.MustParse(memLimit),
},
Requests: kapi.ResourceList{
kapi.ResourceCPU: resource.MustParse(cpuRequest),
kapi.ResourceMemory: resource.MustParse(memRequest),
},
},
},
},
Containers: []kapi.Container{
{
Resources: kapi.ResourceRequirements{
Expand Down
61 changes: 44 additions & 17 deletions pkg/security/admission/admission.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ func (c *constraint) Admit(a kadmission.Attributes) error {
// and validates that the sc falls within the scc constraints. All containers must validate against
// the same scc or is not considered valid.
func assignSecurityContext(provider scc.SecurityContextConstraintsProvider, pod *kapi.Pod, fldPath *field.Path) field.ErrorList {
generatedSCs := make([]*kapi.SecurityContext, len(pod.Spec.Containers))
generatedSCs := make([]*kapi.SecurityContext, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))

errs := field.ErrorList{}

Expand All @@ -183,24 +183,29 @@ func assignSecurityContext(provider scc.SecurityContextConstraintsProvider, pod

// Note: this is not changing the original container, we will set container SCs later so long
// as all containers validated under the same SCC.
for i, containerCopy := range pod.Spec.Containers {
// We will determine the effective security context for the container and validate against that
// since that is how the sc provider will eventually apply settings in the runtime.
// This results in an SC that is based on the Pod's PSC with the set fields from the container
// overriding pod level settings.
containerCopy.SecurityContext = sc.DetermineEffectiveSecurityContext(pod, &containerCopy)

sc, err := provider.CreateContainerSecurityContext(pod, &containerCopy)
if err != nil {
errs = append(errs, field.Invalid(field.NewPath("spec", "containers").Index(i).Child("securityContext"), "", err.Error()))
containerPath := field.NewPath("spec", "initContainers")
for i, containerCopy := range pod.Spec.InitContainers {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This isn't the factorization I expected. Shouldn't InitContainers and Containers be handled the same way with a different path?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You mean, you want a method? I'm fine with a more dramatic change here.

csc, resolutionErrs := resolveContainerSecurityContext(provider, pod, &containerCopy, containerPath.Index(i))
errs = append(errs, resolutionErrs...)
if len(resolutionErrs) > 0 {
continue
}
generatedSCs[i] = sc

containerCopy.SecurityContext = sc
errs = append(errs, provider.ValidateContainerSecurityContext(pod, &containerCopy, field.NewPath("spec", "containers").Index(i).Child("securityContext"))...)
generatedSCs[i] = csc
}

base := len(pod.Spec.InitContainers)

// Note: this is not changing the original container, we will set container SCs later so long
// as all containers validated under the same SCC.
containerPath = field.NewPath("spec", "containers")
for i, containerCopy := range pod.Spec.Containers {
csc, resolutionErrs := resolveContainerSecurityContext(provider, pod, &containerCopy, containerPath.Index(i))
errs = append(errs, resolutionErrs...)
if len(resolutionErrs) > 0 {
continue
}
generatedSCs[i+base] = csc
}
if len(errs) > 0 {
// ensure psc is not mutated if there are errors
pod.Spec.SecurityContext = originalPSC
Expand All @@ -209,12 +214,34 @@ func assignSecurityContext(provider scc.SecurityContextConstraintsProvider, pod

// if we've reached this code then we've generated and validated an SC for every container in the
// pod so let's apply what we generated. Note: the psc is already applied.
for i, sc := range generatedSCs {
pod.Spec.Containers[i].SecurityContext = sc
for i := range pod.Spec.InitContainers {
pod.Spec.InitContainers[i].SecurityContext = generatedSCs[i]
}
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].SecurityContext = generatedSCs[i+base]
}
return nil
}

// resolveContainerSecurityContext checks the provided container against the provider, returning any
// validation errors encountered on the resulting security context, or the security context that was
// resolved. The SecurityContext field of the container is updated, so ensure that a copy of the original
// container is passed here if you wish to preserve the original input.
func resolveContainerSecurityContext(provider scc.SecurityContextConstraintsProvider, pod *kapi.Pod, container *kapi.Container, path *field.Path) (*kapi.SecurityContext, field.ErrorList) {
// We will determine the effective security context for the container and validate against that
// since that is how the sc provider will eventually apply settings in the runtime.
// This results in an SC that is based on the Pod's PSC with the set fields from the container
// overriding pod level settings.
container.SecurityContext = sc.DetermineEffectiveSecurityContext(pod, container)

csc, err := provider.CreateContainerSecurityContext(pod, container)
if err != nil {
return nil, field.ErrorList{field.Invalid(path.Child("securityContext"), "", err.Error())}
}
container.SecurityContext = csc
return csc, provider.ValidateContainerSecurityContext(pod, container, path.Child("securityContext"))
}

// createProvidersFromConstraints creates providers from the constraints supplied, including
// looking up pre-allocated values if necessary using the pod's namespace.
func (c *constraint) createProvidersFromConstraints(ns string, sccs []*kapi.SecurityContextConstraints) ([]scc.SecurityContextConstraintsProvider, []error) {
Expand Down
Loading