Skip to content

Commit

Permalink
Update Origin admission controllers to handle init containers
Browse files Browse the repository at this point in the history
  • Loading branch information
smarterclayton committed Jul 21, 2016
1 parent 8e37516 commit ce6c548
Show file tree
Hide file tree
Showing 6 changed files with 105 additions and 44 deletions.
4 changes: 4 additions & 0 deletions pkg/build/admission/overrides/admission.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,10 @@ func applyForcePullToPod(attributes admission.Attributes) error {
if err != nil {
return err
}
for i := range pod.Spec.InitContainers {
glog.V(5).Infof("Setting ImagePullPolicy to PullAlways on init container %s of pod %s/%s", pod.Spec.InitContainers[i].Name, pod.Namespace, pod.Name)
pod.Spec.InitContainers[i].ImagePullPolicy = kapi.PullAlways
}
for i := range pod.Spec.Containers {
glog.V(5).Infof("Setting ImagePullPolicy to PullAlways on container %s of pod %s/%s", pod.Spec.Containers[i].Name, pod.Namespace, pod.Name)
pod.Spec.Containers[i].ImagePullPolicy = kapi.PullAlways
Expand Down
3 changes: 3 additions & 0 deletions pkg/build/admission/overrides/admission_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,9 @@ func TestBuildOverrideForcePull(t *testing.T) {
if pod.Spec.Containers[0].ImagePullPolicy != kapi.PullAlways {
t.Errorf("%s (%s): image pull policy is not PullAlways", test.name, op)
}
if pod.Spec.InitContainers[0].ImagePullPolicy != kapi.PullAlways {
t.Errorf("%s (%s): image pull policy is not PullAlways", test.name, op)
}
case strategy.DockerStrategy != nil:
if strategy.DockerStrategy.ForcePull == false {
t.Errorf("%s (%s): force pull was false", test.name, op)
Expand Down
16 changes: 16 additions & 0 deletions pkg/build/admission/testutil/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,13 @@ func (p *TestPod) WithAnnotation(name, value string) *TestPod {
}

func (p *TestPod) WithEnvVar(name, value string) *TestPod {
if len(p.Spec.InitContainers) == 0 {
p.Spec.InitContainers = append(p.Spec.InitContainers, kapi.Container{})
}
if len(p.Spec.Containers) == 0 {
p.Spec.Containers = append(p.Spec.Containers, kapi.Container{})
}
p.Spec.InitContainers[0].Env = append(p.Spec.InitContainers[0].Env, kapi.EnvVar{Name: name, Value: value})
p.Spec.Containers[0].Env = append(p.Spec.Containers[0].Env, kapi.EnvVar{Name: name, Value: value})
return p
}
Expand All @@ -46,6 +50,18 @@ func (p *TestPod) WithBuild(t *testing.T, build *buildapi.Build, version string)
return p.WithAnnotation(buildapi.BuildAnnotation, build.Name).WithEnvVar("BUILD", string(encodedBuild))
}

func (p *TestPod) InitEnvValue(name string) string {
if len(p.Spec.InitContainers) == 0 {
return ""
}
for _, ev := range p.Spec.InitContainers[0].Env {
if ev.Name == name {
return ev.Value
}
}
return ""
}

func (p *TestPod) EnvValue(name string) string {
if len(p.Spec.Containers) == 0 {
return ""
Expand Down
91 changes: 49 additions & 42 deletions pkg/quota/admission/clusterresourceoverride/admission.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,54 +155,61 @@ func (a *clusterResourceOverridePlugin) Admit(attr admission.Attributes) error {

// Reuse LimitRanger logic to apply limit/req defaults from the project. Ignore validation
// errors, assume that LimitRanger will run after this plugin to validate.
glog.V(5).Infof("%s: initial pod limits are: %#v", api.PluginName, pod.Spec.Containers[0].Resources)
glog.V(5).Infof("%s: initial pod limits are: %#v", api.PluginName, pod.Spec)
if err := a.LimitRanger.Admit(attr); err != nil {
glog.V(5).Infof("%s: error from LimitRanger: %#v", api.PluginName, err)
}
glog.V(5).Infof("%s: pod limits after LimitRanger are: %#v", api.PluginName, pod.Spec.Containers[0].Resources)
for _, container := range pod.Spec.Containers {
resources := container.Resources
memLimit, memFound := resources.Limits[kapi.ResourceMemory]
if memFound && a.config.memoryRequestToLimitRatio != 0 {
// memory is measured in whole bytes.
// the plugin rounds down to the nearest MiB rather than bytes to improve ease of use for end-users.
amount := memLimit.Value() * int64(a.config.memoryRequestToLimitRatio*100) / 100
// TODO: move into resource.Quantity
var mod int64
switch memLimit.Format {
case resource.BinarySI:
mod = 1024 * 1024
default:
mod = 1000 * 1000
}
if rem := amount % mod; rem != 0 {
amount = amount - rem
}
q := resource.NewQuantity(int64(amount), memLimit.Format)
if memFloor.Cmp(*q) > 0 {
q = memFloor.Copy()
}
resources.Requests[kapi.ResourceMemory] = *q
for i := range pod.Spec.InitContainers {
updateContainerResources(a.config, &pod.Spec.InitContainers[i])
}
for i := range pod.Spec.Containers {
updateContainerResources(a.config, &pod.Spec.Containers[i])
}
glog.V(5).Infof("%s: pod limits after overrides are: %#v", api.PluginName, pod.Spec)
return nil
}

func updateContainerResources(config *internalConfig, container *kapi.Container) {
resources := container.Resources
memLimit, memFound := resources.Limits[kapi.ResourceMemory]
if memFound && config.memoryRequestToLimitRatio != 0 {
// memory is measured in whole bytes.
// the plugin rounds down to the nearest MiB rather than bytes to improve ease of use for end-users.
amount := memLimit.Value() * int64(config.memoryRequestToLimitRatio*100) / 100
// TODO: move into resource.Quantity
var mod int64
switch memLimit.Format {
case resource.BinarySI:
mod = 1024 * 1024
default:
mod = 1000 * 1000
}
if rem := amount % mod; rem != 0 {
amount = amount - rem
}
if memFound && a.config.limitCPUToMemoryRatio != 0 {
amount := float64(memLimit.Value()) * a.config.limitCPUToMemoryRatio * cpuBaseScaleFactor
q := resource.NewMilliQuantity(int64(amount), resource.DecimalSI)
if cpuFloor.Cmp(*q) > 0 {
q = cpuFloor.Copy()
}
resources.Limits[kapi.ResourceCPU] = *q
q := resource.NewQuantity(int64(amount), memLimit.Format)
if memFloor.Cmp(*q) > 0 {
q = memFloor.Copy()
}
resources.Requests[kapi.ResourceMemory] = *q
}
if memFound && config.limitCPUToMemoryRatio != 0 {
amount := float64(memLimit.Value()) * config.limitCPUToMemoryRatio * cpuBaseScaleFactor
q := resource.NewMilliQuantity(int64(amount), resource.DecimalSI)
if cpuFloor.Cmp(*q) > 0 {
q = cpuFloor.Copy()
}
resources.Limits[kapi.ResourceCPU] = *q
}

cpuLimit, cpuFound := resources.Limits[kapi.ResourceCPU]
if cpuFound && a.config.cpuRequestToLimitRatio != 0 {
amount := float64(cpuLimit.MilliValue()) * a.config.cpuRequestToLimitRatio
q := resource.NewMilliQuantity(int64(amount), cpuLimit.Format)
if cpuFloor.Cmp(*q) > 0 {
q = cpuFloor.Copy()
}
resources.Requests[kapi.ResourceCPU] = *q
cpuLimit, cpuFound := resources.Limits[kapi.ResourceCPU]
if cpuFound && config.cpuRequestToLimitRatio != 0 {
amount := float64(cpuLimit.MilliValue()) * config.cpuRequestToLimitRatio
q := resource.NewMilliQuantity(int64(amount), cpuLimit.Format)
if cpuFloor.Cmp(*q) > 0 {
q = cpuFloor.Copy()
}
resources.Requests[kapi.ResourceCPU] = *q
}
glog.V(5).Infof("%s: pod limits after overrides are: %#v", api.PluginName, pod.Spec.Containers[0].Resources)
return nil

}
32 changes: 31 additions & 1 deletion pkg/quota/admission/clusterresourceoverride/admission_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,18 @@ func TestLimitRequestAdmission(t *testing.T) {
t.Errorf("%s: admission controller returned error: %v", test.name, err)
continue
}
resources := test.pod.Spec.Containers[0].Resources // only test one container
resources := test.pod.Spec.InitContainers[0].Resources // only test one container
if actual := resources.Requests[kapi.ResourceMemory]; test.expectedMemRequest.Cmp(actual) != 0 {
t.Errorf("%s: memory requests do not match; %v should be %v", test.name, actual, test.expectedMemRequest)
}
if actual := resources.Requests[kapi.ResourceCPU]; test.expectedCpuRequest.Cmp(actual) != 0 {
t.Errorf("%s: cpu requests do not match; %v should be %v", test.name, actual, test.expectedCpuRequest)
}
if actual := resources.Limits[kapi.ResourceCPU]; test.expectedCpuLimit.Cmp(actual) != 0 {
t.Errorf("%s: cpu limits do not match; %v should be %v", test.name, actual, test.expectedCpuLimit)
}

resources = test.pod.Spec.Containers[0].Resources // only test one container
if actual := resources.Requests[kapi.ResourceMemory]; test.expectedMemRequest.Cmp(actual) != 0 {
t.Errorf("%s: memory requests do not match; %v should be %v", test.name, actual, test.expectedMemRequest)
}
Expand All @@ -241,6 +252,11 @@ func TestLimitRequestAdmission(t *testing.T) {
func testBestEffortPod() *kapi.Pod {
return &kapi.Pod{
Spec: kapi.PodSpec{
InitContainers: []kapi.Container{
{
Resources: kapi.ResourceRequirements{},
},
},
Containers: []kapi.Container{
{
Resources: kapi.ResourceRequirements{},
Expand All @@ -253,6 +269,20 @@ func testBestEffortPod() *kapi.Pod {
func testPod(memLimit string, memRequest string, cpuLimit string, cpuRequest string) *kapi.Pod {
return &kapi.Pod{
Spec: kapi.PodSpec{
InitContainers: []kapi.Container{
{
Resources: kapi.ResourceRequirements{
Limits: kapi.ResourceList{
kapi.ResourceCPU: resource.MustParse(cpuLimit),
kapi.ResourceMemory: resource.MustParse(memLimit),
},
Requests: kapi.ResourceList{
kapi.ResourceCPU: resource.MustParse(cpuRequest),
kapi.ResourceMemory: resource.MustParse(memRequest),
},
},
},
},
Containers: []kapi.Container{
{
Resources: kapi.ResourceRequirements{
Expand Down
3 changes: 2 additions & 1 deletion test/extended/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ function os::test::extended::setup {
if [[ -z $(os::build::find-binary openshift) ]]; then
hack/build-go.sh
fi

os::util::environment::setup_time_vars

# ensure proper relative directories are set
Expand Down Expand Up @@ -228,6 +228,7 @@ readonly CONFORMANCE_TESTS=(
"\[volumes\] Test local storage quota FSGroup"
"test deployment should run a deployment to completion"
"Variable Expansion"
"init containers"
"Clean up pods on node kubelet"
"\[Feature\:SecurityContext\]"
"should create a LimitRange with defaults"
Expand Down

0 comments on commit ce6c548

Please sign in to comment.