From ce6c5480ded5c14688e5217b2ee3283538c613af Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Wed, 20 Jul 2016 23:06:56 -0400 Subject: [PATCH] Update Origin admission controllers to handle init containers --- pkg/build/admission/overrides/admission.go | 4 + .../admission/overrides/admission_test.go | 3 + pkg/build/admission/testutil/pod.go | 16 ++++ .../clusterresourceoverride/admission.go | 91 ++++++++++--------- .../clusterresourceoverride/admission_test.go | 32 ++++++- test/extended/setup.sh | 3 +- 6 files changed, 105 insertions(+), 44 deletions(-) diff --git a/pkg/build/admission/overrides/admission.go b/pkg/build/admission/overrides/admission.go index 40a62ae1f5dc..d4ac5862065c 100644 --- a/pkg/build/admission/overrides/admission.go +++ b/pkg/build/admission/overrides/admission.go @@ -91,6 +91,10 @@ func applyForcePullToPod(attributes admission.Attributes) error { if err != nil { return err } + for i := range pod.Spec.InitContainers { + glog.V(5).Infof("Setting ImagePullPolicy to PullAlways on init container %s of pod %s/%s", pod.Spec.InitContainers[i].Name, pod.Namespace, pod.Name) + pod.Spec.InitContainers[i].ImagePullPolicy = kapi.PullAlways + } for i := range pod.Spec.Containers { glog.V(5).Infof("Setting ImagePullPolicy to PullAlways on container %s of pod %s/%s", pod.Spec.Containers[i].Name, pod.Namespace, pod.Name) pod.Spec.Containers[i].ImagePullPolicy = kapi.PullAlways diff --git a/pkg/build/admission/overrides/admission_test.go b/pkg/build/admission/overrides/admission_test.go index 0bd3759f8655..ebac5a378558 100644 --- a/pkg/build/admission/overrides/admission_test.go +++ b/pkg/build/admission/overrides/admission_test.go @@ -51,6 +51,9 @@ func TestBuildOverrideForcePull(t *testing.T) { if pod.Spec.Containers[0].ImagePullPolicy != kapi.PullAlways { t.Errorf("%s (%s): image pull policy is not PullAlways", test.name, op) } + if pod.Spec.InitContainers[0].ImagePullPolicy != kapi.PullAlways { + t.Errorf("%s (%s): image pull policy is not PullAlways", test.name, op) + } case strategy.DockerStrategy != nil: if strategy.DockerStrategy.ForcePull == false { t.Errorf("%s (%s): force pull was false", test.name, op) diff --git a/pkg/build/admission/testutil/pod.go b/pkg/build/admission/testutil/pod.go index 6b65f49e1f28..7d74fdace477 100644 --- a/pkg/build/admission/testutil/pod.go +++ b/pkg/build/admission/testutil/pod.go @@ -26,9 +26,13 @@ func (p *TestPod) WithAnnotation(name, value string) *TestPod { } func (p *TestPod) WithEnvVar(name, value string) *TestPod { + if len(p.Spec.InitContainers) == 0 { + p.Spec.InitContainers = append(p.Spec.InitContainers, kapi.Container{}) + } if len(p.Spec.Containers) == 0 { p.Spec.Containers = append(p.Spec.Containers, kapi.Container{}) } + p.Spec.InitContainers[0].Env = append(p.Spec.InitContainers[0].Env, kapi.EnvVar{Name: name, Value: value}) p.Spec.Containers[0].Env = append(p.Spec.Containers[0].Env, kapi.EnvVar{Name: name, Value: value}) return p } @@ -46,6 +50,18 @@ func (p *TestPod) WithBuild(t *testing.T, build *buildapi.Build, version string) return p.WithAnnotation(buildapi.BuildAnnotation, build.Name).WithEnvVar("BUILD", string(encodedBuild)) } +func (p *TestPod) InitEnvValue(name string) string { + if len(p.Spec.InitContainers) == 0 { + return "" + } + for _, ev := range p.Spec.InitContainers[0].Env { + if ev.Name == name { + return ev.Value + } + } + return "" +} + func (p *TestPod) EnvValue(name string) string { if len(p.Spec.Containers) == 0 { return "" diff --git a/pkg/quota/admission/clusterresourceoverride/admission.go b/pkg/quota/admission/clusterresourceoverride/admission.go index eaea370ba948..ec52b1583d72 100644 --- a/pkg/quota/admission/clusterresourceoverride/admission.go +++ b/pkg/quota/admission/clusterresourceoverride/admission.go @@ -155,54 +155,61 @@ func (a *clusterResourceOverridePlugin) Admit(attr admission.Attributes) error { // Reuse LimitRanger logic to apply limit/req defaults from the project. Ignore validation // errors, assume that LimitRanger will run after this plugin to validate. - glog.V(5).Infof("%s: initial pod limits are: %#v", api.PluginName, pod.Spec.Containers[0].Resources) + glog.V(5).Infof("%s: initial pod limits are: %#v", api.PluginName, pod.Spec) if err := a.LimitRanger.Admit(attr); err != nil { glog.V(5).Infof("%s: error from LimitRanger: %#v", api.PluginName, err) } - glog.V(5).Infof("%s: pod limits after LimitRanger are: %#v", api.PluginName, pod.Spec.Containers[0].Resources) - for _, container := range pod.Spec.Containers { - resources := container.Resources - memLimit, memFound := resources.Limits[kapi.ResourceMemory] - if memFound && a.config.memoryRequestToLimitRatio != 0 { - // memory is measured in whole bytes. - // the plugin rounds down to the nearest MiB rather than bytes to improve ease of use for end-users. - amount := memLimit.Value() * int64(a.config.memoryRequestToLimitRatio*100) / 100 - // TODO: move into resource.Quantity - var mod int64 - switch memLimit.Format { - case resource.BinarySI: - mod = 1024 * 1024 - default: - mod = 1000 * 1000 - } - if rem := amount % mod; rem != 0 { - amount = amount - rem - } - q := resource.NewQuantity(int64(amount), memLimit.Format) - if memFloor.Cmp(*q) > 0 { - q = memFloor.Copy() - } - resources.Requests[kapi.ResourceMemory] = *q + for i := range pod.Spec.InitContainers { + updateContainerResources(a.config, &pod.Spec.InitContainers[i]) + } + for i := range pod.Spec.Containers { + updateContainerResources(a.config, &pod.Spec.Containers[i]) + } + glog.V(5).Infof("%s: pod limits after overrides are: %#v", api.PluginName, pod.Spec) + return nil +} + +func updateContainerResources(config *internalConfig, container *kapi.Container) { + resources := container.Resources + memLimit, memFound := resources.Limits[kapi.ResourceMemory] + if memFound && config.memoryRequestToLimitRatio != 0 { + // memory is measured in whole bytes. + // the plugin rounds down to the nearest MiB rather than bytes to improve ease of use for end-users. + amount := memLimit.Value() * int64(config.memoryRequestToLimitRatio*100) / 100 + // TODO: move into resource.Quantity + var mod int64 + switch memLimit.Format { + case resource.BinarySI: + mod = 1024 * 1024 + default: + mod = 1000 * 1000 + } + if rem := amount % mod; rem != 0 { + amount = amount - rem } - if memFound && a.config.limitCPUToMemoryRatio != 0 { - amount := float64(memLimit.Value()) * a.config.limitCPUToMemoryRatio * cpuBaseScaleFactor - q := resource.NewMilliQuantity(int64(amount), resource.DecimalSI) - if cpuFloor.Cmp(*q) > 0 { - q = cpuFloor.Copy() - } - resources.Limits[kapi.ResourceCPU] = *q + q := resource.NewQuantity(int64(amount), memLimit.Format) + if memFloor.Cmp(*q) > 0 { + q = memFloor.Copy() } + resources.Requests[kapi.ResourceMemory] = *q + } + if memFound && config.limitCPUToMemoryRatio != 0 { + amount := float64(memLimit.Value()) * config.limitCPUToMemoryRatio * cpuBaseScaleFactor + q := resource.NewMilliQuantity(int64(amount), resource.DecimalSI) + if cpuFloor.Cmp(*q) > 0 { + q = cpuFloor.Copy() + } + resources.Limits[kapi.ResourceCPU] = *q + } - cpuLimit, cpuFound := resources.Limits[kapi.ResourceCPU] - if cpuFound && a.config.cpuRequestToLimitRatio != 0 { - amount := float64(cpuLimit.MilliValue()) * a.config.cpuRequestToLimitRatio - q := resource.NewMilliQuantity(int64(amount), cpuLimit.Format) - if cpuFloor.Cmp(*q) > 0 { - q = cpuFloor.Copy() - } - resources.Requests[kapi.ResourceCPU] = *q + cpuLimit, cpuFound := resources.Limits[kapi.ResourceCPU] + if cpuFound && config.cpuRequestToLimitRatio != 0 { + amount := float64(cpuLimit.MilliValue()) * config.cpuRequestToLimitRatio + q := resource.NewMilliQuantity(int64(amount), cpuLimit.Format) + if cpuFloor.Cmp(*q) > 0 { + q = cpuFloor.Copy() } + resources.Requests[kapi.ResourceCPU] = *q } - glog.V(5).Infof("%s: pod limits after overrides are: %#v", api.PluginName, pod.Spec.Containers[0].Resources) - return nil + } diff --git a/pkg/quota/admission/clusterresourceoverride/admission_test.go b/pkg/quota/admission/clusterresourceoverride/admission_test.go index 56fb33b279d5..1d4eb58217ec 100644 --- a/pkg/quota/admission/clusterresourceoverride/admission_test.go +++ b/pkg/quota/admission/clusterresourceoverride/admission_test.go @@ -225,7 +225,18 @@ func TestLimitRequestAdmission(t *testing.T) { t.Errorf("%s: admission controller returned error: %v", test.name, err) continue } - resources := test.pod.Spec.Containers[0].Resources // only test one container + resources := test.pod.Spec.InitContainers[0].Resources // only test one container + if actual := resources.Requests[kapi.ResourceMemory]; test.expectedMemRequest.Cmp(actual) != 0 { + t.Errorf("%s: memory requests do not match; %v should be %v", test.name, actual, test.expectedMemRequest) + } + if actual := resources.Requests[kapi.ResourceCPU]; test.expectedCpuRequest.Cmp(actual) != 0 { + t.Errorf("%s: cpu requests do not match; %v should be %v", test.name, actual, test.expectedCpuRequest) + } + if actual := resources.Limits[kapi.ResourceCPU]; test.expectedCpuLimit.Cmp(actual) != 0 { + t.Errorf("%s: cpu limits do not match; %v should be %v", test.name, actual, test.expectedCpuLimit) + } + + resources = test.pod.Spec.Containers[0].Resources // only test one container if actual := resources.Requests[kapi.ResourceMemory]; test.expectedMemRequest.Cmp(actual) != 0 { t.Errorf("%s: memory requests do not match; %v should be %v", test.name, actual, test.expectedMemRequest) } @@ -241,6 +252,11 @@ func TestLimitRequestAdmission(t *testing.T) { func testBestEffortPod() *kapi.Pod { return &kapi.Pod{ Spec: kapi.PodSpec{ + InitContainers: []kapi.Container{ + { + Resources: kapi.ResourceRequirements{}, + }, + }, Containers: []kapi.Container{ { Resources: kapi.ResourceRequirements{}, @@ -253,6 +269,20 @@ func testBestEffortPod() *kapi.Pod { func testPod(memLimit string, memRequest string, cpuLimit string, cpuRequest string) *kapi.Pod { return &kapi.Pod{ Spec: kapi.PodSpec{ + InitContainers: []kapi.Container{ + { + Resources: kapi.ResourceRequirements{ + Limits: kapi.ResourceList{ + kapi.ResourceCPU: resource.MustParse(cpuLimit), + kapi.ResourceMemory: resource.MustParse(memLimit), + }, + Requests: kapi.ResourceList{ + kapi.ResourceCPU: resource.MustParse(cpuRequest), + kapi.ResourceMemory: resource.MustParse(memRequest), + }, + }, + }, + }, Containers: []kapi.Container{ { Resources: kapi.ResourceRequirements{ diff --git a/test/extended/setup.sh b/test/extended/setup.sh index 4c28079ebf2b..07aa6adacc97 100644 --- a/test/extended/setup.sh +++ b/test/extended/setup.sh @@ -34,7 +34,7 @@ function os::test::extended::setup { if [[ -z $(os::build::find-binary openshift) ]]; then hack/build-go.sh fi - + os::util::environment::setup_time_vars # ensure proper relative directories are set @@ -228,6 +228,7 @@ readonly CONFORMANCE_TESTS=( "\[volumes\] Test local storage quota FSGroup" "test deployment should run a deployment to completion" "Variable Expansion" + "init containers" "Clean up pods on node kubelet" "\[Feature\:SecurityContext\]" "should create a LimitRange with defaults"