From 51a996d5f41d0f73d7f1fd034f2c27e7f13d8cef Mon Sep 17 00:00:00 2001 From: David Eads Date: Thu, 29 Oct 2020 13:55:56 +0100 Subject: [PATCH] UPSTREAM: : openshift-kube-apiserver: add openshift-kube-apiserver code UPSTREAM: : openshift-kube-apiserver: enabled conversion gen for admission configs UPSTREAM: : openshift-kube-apiserver/admission: fix featuregates resource name UPSTREAM: : openshift-kube-apiserver/admission: add missing FeatureSets UPSTREAM: : openshift-kube-apiserver: use github.com/openshift/apiserver-library-go/pkg/labelselector UPSTREAM: : openshift authenticator: don't allow old-style tokens UPSTREAM: : oauth-authn: support sha256 prefixed tokens UPSTREAM: : oauth-token-authn: switch to sha256~ prefix UPSTREAM: : oauth-token-authn: add sha256~ support to bootstrap authenticator UPSTREAM: : remove the openshift authenticator from the apiserver In 4.8, we moved the authenticator to be configured via webhookTokenAuthenticators to an endpoint in the oauth-apiserver, this should now be safe to remove. UPSTREAM: : set ResourceQuotaValidationOptions to true When PodAffinityNamespaceSelector goes to beta or GA this might affect how our ClusterResourceQuota might work UPSTREAM: : simplify the authorizer patch to allow the flags to function UPSTREAM: : eliminate unnecessary closure in openshift configuration wiring UPSTREAM: : add crdvalidation for apiserver.spec.tlsSecurityProfile UPSTREAM: : openshift-kube-apiserver: Add custom resource validation for network spec UPSTREAM: : stop overriding flags that are explicitly set UPSTREAM: : add readyz check for openshift apiserver availability UPSTREAM: : wait for oauth-apiserver accessibility UPSTREAM: : provide a new admission plugin to mutate management pods CPUs requests The ManagementCPUOverride admission plugin replaces pod container CPU requests with a new management resource. It applies to all pods that: 1. are in an allowed namespace 2. and have the workload annotation. It also sets the new management resource request and limit and set resource annotation that CRI-O can recognize and apply the relevant changes. For more information, see - https://github.com/openshift/enhancements/pull/703 Conditions for CPUs requests deletion: 1. The namespace should have allowed annotation "workload.openshift.io/allowed": "management" 2. The pod should have management annotation: "workload.openshift.io/management": "{"effect": "PreferredDuringScheduling"}" 3. All nodes under the cluster should have new management resource - "management.workload.openshift.io/cores" 4. The CPU request deletion will not change the pod QoS class UPSTREAM: : Does not prevent pod creation because of no nodes reason when it runs under the regular cluster Check the `cluster` infrastructure resource status to be sure that we run on top of a SNO cluster and in case if the pod runs on top of regular cluster, exit before node existence check. UPSTREAM: : do not mutate pods when it has a container with both CPU request and limit Removing the CPU request from the container that has a CPU limit will result in the defaulter to set the CPU request back equals to the CPU limit. UPSTREAM: : Reject the pod creation when we can not decide the cluster type It is possible a race condition between pod creation and the update of the infrastructure resource status with correct values under Status.ControlPlaneTopology and Status.InfrastructureTopology. UPSTREAM: : add CRD validation for dnses Add an admission plugin that validates the dnses.operator.openshift.io custom resource. For now, the plugin only validates the DNS pod node-placement parameters. This commit fixes bug 1967745. https://bugzilla.redhat.com/show_bug.cgi?id=1967745 * openshift-kube-apiserver/admission/customresourcevalidation/attributes.go (init): Install operatorv1 into supportedObjectsScheme. * openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go (AllCustomResourceValidators, RegisterCustomResourceValidation): Register the new plugin. * openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns.go: New file. (PluginName): New const. (Register): New function. Register the plugin. (toDNSV1): New function. Convert a runtime object to a versioned DNS. (dnsV1): New type to represent a runtime object that is validated as a versioned DNS. (ValidateCreate, ValidateUpdate, ValidateStatusUpdate): New methods. Implement the ObjectValidator interface, using the validateDNSSpecCreate and validateDNSSpecUpdate helpers. (validateDNSSpecCreate, validateDNSSpecUpdate): New functions. Validate a DNS, using the validateDNSSpec helper. (validateDNSSpec): New function. Validate the spec field of a DNS, using the validateDNSNodePlacement helper. (validateDNSNodePlacement): New function. Validate the node selector and tolerations in a DNS's node-placement parameters, using validateTolerations. (validateTolerations): New function. Validate a slice of corev1.Toleration. * openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns_test.go: New file. (TestFailValidateDNSSpec): Verify that validateDNSSpec rejects invalid DNS specs. (TestSucceedValidateDNSSpec): Verify that validateDNSSpec accepts valid DNS specs. * vendor/*: Regenerate. UPSTREAM: : prevent the kubecontrollermanager service-ca from getting less secure UPSTREAM: : allow SCC to be disabled on a per-namespace basis UPSTREAM: : verify required http2 cipher suites In the Apiserver admission, we need to return an error if the required http2 cipher suites are missing from a custom tlsSecurityProfile. Currently, custom cipher suites missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 result in invalid http2 Server configuration causing the apiservers to crash. See: go/x/net/http2.ConfigureServer for futher information. UPSTREAM: : drop the warning to use --keep-annotations When a user runs the `oc debug` command for the pod with the management resource, we will inform him that he should pass `--keep-annotations` parameter to the debug command. UPSTREAM: : admission/managementcpusoverride: cover the roll-back case During the upgrade and roll-back flow 4.7->4.8->4.7, the topology related fields under the infrastructure can be empty because the old API does not support them. The code will equal the empty infrastructure section with the current one. When the status has some other non-empty field, and topology fields are empty, we assume that the cluster currently passes via roll-back and not via the clean install. UPSTREAM: : Remove pod warning annotation when workload partitioning is disabled UPSTREAM: : use new access token inactivity timeout field. UPSTREAM: : apirequestcount validation UPSTREAM: : Added config node object validation for extreme latency profiles UPSTREAM: : Add Upstream validation in the DNS admission check patches UPSTREAM: : Make RestrictedEndpointsAdmission check NotReadyAddresses UPSTREAM: : Make RestrictedEndpointsAdmission restrict EndpointSlices as well Moved SkipSystemMasterAuthorizers to the authorizer. UPSTREAM: : Add validation plugin for CRD-based route parity. UPSTREAM: : Add host assignment plugin for CRD-based routes. UPSTREAM: : Apply shared defaulters to CRD-based routes. Signed-off-by: Artyom Lukianov Signed-off-by: Damien Grisonnet Signed-off-by: Swarup Ghosh OpenShift-Rebase-Source: 932411ee865 OpenShift-Rebase-Source: 1899555d4a7 OpenShift-Rebase-Source: 453583eb395 OpenShift-Rebase-Source: bf7e23e03e9 UPSTREAM: : STOR-829: Add CSIInlineVolumeSecurity admission plugin The CSIInlineVolumeSecurity admission plugin inspects inline CSI volumes on pod creation and compares the security.openshift.io/csi-ephemeral-volume-profile label on the CSIDriver object to the pod security profile on the namespace. OpenShift-Rebase-Source: a65c34b8f1a UPSTREAM: : add icsp,idms,itms validation reject creating icsp with idms/itms exist Reject icsp with idms.itms resources exists. According to the discuusion resolution https://docs.google.com/document/d/13h6IJn8wlzXdiPMvCWlMEHOXXqEZ9_GYOl02Wldb3z8/edit?usp=sharing, one of current icsp or new mirror setting crd should be rejected if a user tries to use them on the same cluster. UPSTREAM: : node admission plugin for cpu partitioning The ManagedNode admission plugin makes the Infrastructure.Status.CPUPartitioning field authoritative. This validates that nodes that wish to join the cluster are first configured to properly handle workload pinning For more information see - https://github.com/openshift/enhancements/pull/1213 UPSTREAM: : kube-apiserver: allow injection of kube-apiserver options UPSTREAM: : kube-apiserver: allow rewiring OpenShift-Rebase-Source: 56b49c9c143 OpenShift-Rebase-Source: bcf574c65d1 UPSTREAM: : STOR-1270: Admission plugin to deny deletion of storages.operator.openshift.io UPSTREAM: : support for both icsp and idms objects Revert: https://github.com/openshift/kubernetes/pull/1310 Add support for ICSP and IDMS objects living at the same time. UPSTREAM: : openshift-kube-apiserver: add openshift-kube-apisever code UPSTREAM: : featureset validation moved to CEL UPSTREAM: : Add context to ObjectValidator TODO: add router validation logic to implement ctx add in ObjectValidator UPSTREAM: : loosen authentication.spec.type validation UPSTREAM: : openshift-kube-apiserver: add kube-apiserver patches pod .spec.nodeName should not override project node selector in podNodeEnvironment admission plugin UPSTREAM: : Fix sets.String and sets.Set[string] type mismatch libray-go uses the genetic Set while upstream still uses the deprecated sets.String in some part of its codes. UPSTREAM: : Add RouteExternalCertificate validation in Route ObjectValidator UPSTREAM: : Fix incorrect type casting in admission validate_apiserver UPSTREAM: : react to library-go changes UPSTREAM: : Update RouteExternalCertificate validation in Route ObjectValidator --- .../admissionenablement/admission.go | 15 + .../admissionenablement/admission_config.go | 51 + .../admission/admissionenablement/register.go | 122 +++ .../admissionenablement/register_test.go | 55 ++ .../restrictusers/groupcache_test.go | 28 + .../restrictusers/intializers.go | 28 + .../restrictusers/restrictusers.go | 234 +++++ .../restrictusers/restrictusers_test.go | 404 ++++++++ .../restrictusers/subjectchecker.go | 312 ++++++ .../restrictusers/subjectchecker_test.go | 349 +++++++ .../restrictusers/usercache/groups.go | 55 ++ .../apis/clusterresourceoverride/doc.go | 4 + .../apis/clusterresourceoverride/name.go | 4 + .../apis/clusterresourceoverride/register.go | 23 + .../apis/clusterresourceoverride/types.go | 24 + .../apis/clusterresourceoverride/v1/doc.go | 5 + .../clusterresourceoverride/v1/register.go | 27 + .../clusterresourceoverride/v1/swagger_doc.go | 17 + .../apis/clusterresourceoverride/v1/types.go | 24 + .../validation/validation.go | 27 + .../autoscaling/apis/runonceduration/doc.go | 4 + .../apis/runonceduration/register.go | 34 + .../autoscaling/apis/runonceduration/types.go | 26 + .../apis/runonceduration/v1/conversion.go | 26 + .../apis/runonceduration/v1/doc.go | 5 + .../apis/runonceduration/v1/register.go | 29 + .../apis/runonceduration/v1/swagger_doc.go | 15 + .../apis/runonceduration/v1/types.go | 22 + .../runonceduration/validation/validation.go | 18 + .../validation/validation_test.go | 29 + .../clusterresourceoverride/admission.go | 348 +++++++ .../clusterresourceoverride/admission_test.go | 507 ++++++++++ .../clusterresourceoverride/doc.go | 8 + .../autoscaling/managednode/admission.go | 136 +++ .../autoscaling/managednode/admission_test.go | 128 +++ .../autoscaling/managednode/initializers.go | 28 + .../managementcpusoverride/admission.go | 639 +++++++++++++ .../managementcpusoverride/admission_test.go | 683 +++++++++++++ .../autoscaling/managementcpusoverride/doc.go | 16 + .../managementcpusoverride/initializers.go | 28 + .../autoscaling/runonceduration/admission.go | 148 +++ .../runonceduration/admission_test.go | 215 +++++ .../autoscaling/runonceduration/doc.go | 21 + .../validate_apirequestcount.go | 109 +++ .../validate_apirequestcount_test.go | 35 + .../apiserver/validate_apiserver.go | 259 +++++ .../apiserver/validate_apiserver_test.go | 286 ++++++ .../apiserver/validation_wrapper.go | 72 ++ .../customresourcevalidation/attributes.go | 59 ++ .../authentication/validate_authentication.go | 134 +++ .../validate_authentication_test.go | 179 ++++ .../clusterresourcequota/validate_crq.go | 84 ++ .../validation/validation.go | 68 ++ .../validation/validation_test.go | 173 ++++ .../deny_delete_cluster_config_resource.go | 54 ++ ...eny_delete_cluster_config_resource_test.go | 73 ++ .../console/validate_console.go | 119 +++ .../cr_validation_registration.go | 92 ++ .../customresourcevalidator.go | 101 ++ .../customresourcevalidator_test.go | 278 ++++++ .../dns/validate_dns.go | 242 +++++ .../dns/validate_dns_test.go | 899 ++++++++++++++++++ .../features/validate_features.go | 93 ++ .../customresourcevalidation/helpers.go | 40 + .../image/validate_image.go | 95 ++ .../validate_kubecontrollermanager.go | 114 +++ .../network/validate_network_config.go | 128 +++ ...restrict_extreme_worker_latency_profile.go | 124 +++ ...ict_extreme_worker_latency_profile_test.go | 68 ++ .../customresourcevalidation/oauth/helpers.go | 32 + .../oauth/validate_github.go | 69 ++ .../oauth/validate_github_test.go | 249 +++++ .../oauth/validate_gitlab.go | 26 + .../oauth/validate_gitlab_test.go | 104 ++ .../oauth/validate_google.go | 23 + .../oauth/validate_google_test.go | 90 ++ .../oauth/validate_idp.go | 215 +++++ .../oauth/validate_idp_test.go | 429 +++++++++ .../oauth/validate_keystone.go | 23 + .../oauth/validate_keystone_test.go | 96 ++ .../oauth/validate_ldap.go | 66 ++ .../oauth/validate_ldap_test.go | 101 ++ .../oauth/validate_oauth.go | 111 +++ .../oauth/validate_openid.go | 54 ++ .../oauth/validate_openid_test.go | 125 +++ .../oauth/validate_requestheader.go | 85 ++ .../oauth/validate_requestheader_test.go | 193 ++++ .../deny_delete_cluster_operator_resource.go | 52 + ...y_delete_cluster_operator_resource_test.go | 73 ++ .../project/validate_project.go | 112 +++ .../rolebindingrestriction/validate_rbr.go | 84 ++ .../validation/validation.go | 115 +++ .../route/default_route.go | 65 ++ .../route/defaulters.go | 28 + .../route/defaulters_test.go | 66 ++ .../customresourcevalidation/route/doc.go | 4 + .../route/validate_route.go | 83 ++ .../route/validate_route_test.go | 149 +++ .../route/validation_opts.go | 31 + .../route/validation_wrapper.go | 92 ++ .../scheduler/validate_scheduler.go | 107 +++ .../defaulting_scc.go | 93 ++ .../defaulting_scc_test.go | 274 ++++++ .../securitycontextconstraints/defaults.go | 100 ++ .../validate_scc.go | 80 ++ .../validation/validation.go | 275 ++++++ .../validation/validation_test.go | 343 +++++++ .../namespaceconditions/decorator.go | 91 ++ .../namespaceconditions/labelcondition.go | 125 +++ .../labelcondition_test.go | 97 ++ .../namespaceconditions/namecondition.go | 60 ++ .../network/apis/externalipranger/doc.go | 4 + .../network/apis/externalipranger/register.go | 20 + .../network/apis/externalipranger/types.go | 20 + .../network/apis/externalipranger/v1/doc.go | 5 + .../apis/externalipranger/v1/register.go | 24 + .../network/apis/externalipranger/v1/types.go | 20 + .../network/apis/restrictedendpoints/doc.go | 4 + .../apis/restrictedendpoints/register.go | 20 + .../network/apis/restrictedendpoints/types.go | 15 + .../apis/restrictedendpoints/v1/doc.go | 5 + .../apis/restrictedendpoints/v1/register.go | 24 + .../apis/restrictedendpoints/v1/types.go | 15 + .../externalipranger/externalip_admission.go | 209 ++++ .../externalip_admission_test.go | 322 +++++++ .../restrictedendpoints/endpoint_admission.go | 292 ++++++ .../route/apis/hostassignment/doc.go | 4 + .../route/apis/hostassignment/register.go | 31 + .../route/apis/hostassignment/types.go | 17 + .../route/apis/hostassignment/v1/doc.go | 5 + .../route/apis/hostassignment/v1/register.go | 64 ++ .../route/apis/hostassignment/v1/types.go | 17 + .../route/apis/ingressadmission/doc.go | 4 + .../route/apis/ingressadmission/register.go | 33 + .../route/apis/ingressadmission/types.go | 22 + .../apis/ingressadmission/v1/defaults_test.go | 59 ++ .../route/apis/ingressadmission/v1/doc.go | 5 + .../apis/ingressadmission/v1/register.go | 27 + .../apis/ingressadmission/v1/swagger_doc.go | 15 + .../route/apis/ingressadmission/v1/types.go | 22 + .../route/hostassignment/admission.go | 167 ++++ .../admission/route/ingress_admission.go | 162 ++++ .../admission/route/ingress_admission_test.go | 171 ++++ .../scheduler/apis/podnodeconstraints/doc.go | 4 + .../apis/podnodeconstraints/register.go | 33 + .../apis/podnodeconstraints/types.go | 19 + .../apis/podnodeconstraints/v1/defaults.go | 19 + .../podnodeconstraints/v1/defaults_test.go | 59 ++ .../apis/podnodeconstraints/v1/doc.go | 5 + .../apis/podnodeconstraints/v1/register.go | 28 + .../apis/podnodeconstraints/v1/swagger_doc.go | 15 + .../apis/podnodeconstraints/v1/types.go | 20 + .../admission/scheduler/nodeenv/admission.go | 174 ++++ .../scheduler/nodeenv/admission_test.go | 211 ++++ .../scheduler/nodeenv/intializers.go | 28 + .../scheduler/podnodeconstraints/admission.go | 205 ++++ .../podnodeconstraints/admission_test.go | 283 ++++++ .../scheduler/podnodeconstraints/doc.go | 44 + .../csiinlinevolumesecurity/admission.go | 281 ++++++ .../csiinlinevolumesecurity/admission_test.go | 508 ++++++++++ .../storage/csiinlinevolumesecurity/doc.go | 7 + .../authorization/browsersafe/authorizer.go | 107 +++ .../browsersafe/authorizer_test.go | 80 ++ .../scopeauthorizer/authorizer.go | 49 + .../scopeauthorizer/authorizer_test.go | 150 +++ .../configdefault/kubecontrolplane_default.go | 115 +++ .../configdefault/kubecontrolplane_refs.go | 122 +++ .../enablement/enablement.go | 71 ++ .../enablement/intialization.go | 85 ++ .../openshiftkubeapiserver/flags.go | 112 +++ .../openshiftkubeapiserver/flags_test.go | 26 + .../openshiftkubeapiserver/patch.go | 181 ++++ .../patch_handlerchain.go | 97 ++ .../openshiftkubeapiserver/sdn_readyz_wait.go | 146 +++ .../openshiftkubeapiserver/wellknown_oauth.go | 57 ++ 175 files changed, 18754 insertions(+) create mode 100644 openshift-kube-apiserver/admission/admissionenablement/admission.go create mode 100644 openshift-kube-apiserver/admission/admissionenablement/admission_config.go create mode 100644 openshift-kube-apiserver/admission/admissionenablement/register.go create mode 100644 openshift-kube-apiserver/admission/admissionenablement/register_test.go create mode 100644 openshift-kube-apiserver/admission/authorization/restrictusers/groupcache_test.go create mode 100644 openshift-kube-apiserver/admission/authorization/restrictusers/intializers.go create mode 100644 openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers.go create mode 100644 openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers_test.go create mode 100644 openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker.go create mode 100644 openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker_test.go create mode 100644 openshift-kube-apiserver/admission/authorization/restrictusers/usercache/groups.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/name.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/register.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/types.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/register.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/swagger_doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/types.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation/validation.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/register.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/types.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/conversion.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/register.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/swagger_doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/types.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation_test.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission_test.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/managednode/admission.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/managednode/admission_test.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/managednode/initializers.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission_test.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/initializers.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/runonceduration/admission.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/runonceduration/admission_test.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/runonceduration/doc.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount/validate_apirequestcount.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount/validate_apirequestcount_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validation_wrapper.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/attributes.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validate_crq.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/console/validate_console.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/helpers.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/image/validate_image.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager/validate_kubecontrollermanager.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/network/validate_network_config.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/helpers.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_oauth.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/operator/deny_delete_cluster_operator_resource.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/operator/deny_delete_cluster_operator_resource_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/project/validate_project.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validate_rbr.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validation/validation.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/route/default_route.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/route/defaulters.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/route/defaulters_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/route/doc.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/route/validate_route.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/route/validate_route_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/route/validation_opts.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/route/validation_wrapper.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/scheduler/validate_scheduler.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaults.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validate_scc.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation_test.go create mode 100644 openshift-kube-apiserver/admission/namespaceconditions/decorator.go create mode 100644 openshift-kube-apiserver/admission/namespaceconditions/labelcondition.go create mode 100644 openshift-kube-apiserver/admission/namespaceconditions/labelcondition_test.go create mode 100644 openshift-kube-apiserver/admission/namespaceconditions/namecondition.go create mode 100644 openshift-kube-apiserver/admission/network/apis/externalipranger/doc.go create mode 100644 openshift-kube-apiserver/admission/network/apis/externalipranger/register.go create mode 100644 openshift-kube-apiserver/admission/network/apis/externalipranger/types.go create mode 100644 openshift-kube-apiserver/admission/network/apis/externalipranger/v1/doc.go create mode 100644 openshift-kube-apiserver/admission/network/apis/externalipranger/v1/register.go create mode 100644 openshift-kube-apiserver/admission/network/apis/externalipranger/v1/types.go create mode 100644 openshift-kube-apiserver/admission/network/apis/restrictedendpoints/doc.go create mode 100644 openshift-kube-apiserver/admission/network/apis/restrictedendpoints/register.go create mode 100644 openshift-kube-apiserver/admission/network/apis/restrictedendpoints/types.go create mode 100644 openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/doc.go create mode 100644 openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/register.go create mode 100644 openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/types.go create mode 100644 openshift-kube-apiserver/admission/network/externalipranger/externalip_admission.go create mode 100644 openshift-kube-apiserver/admission/network/externalipranger/externalip_admission_test.go create mode 100644 openshift-kube-apiserver/admission/network/restrictedendpoints/endpoint_admission.go create mode 100644 openshift-kube-apiserver/admission/route/apis/hostassignment/doc.go create mode 100644 openshift-kube-apiserver/admission/route/apis/hostassignment/register.go create mode 100644 openshift-kube-apiserver/admission/route/apis/hostassignment/types.go create mode 100644 openshift-kube-apiserver/admission/route/apis/hostassignment/v1/doc.go create mode 100644 openshift-kube-apiserver/admission/route/apis/hostassignment/v1/register.go create mode 100644 openshift-kube-apiserver/admission/route/apis/hostassignment/v1/types.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/doc.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/register.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/types.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/defaults_test.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/doc.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/register.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/swagger_doc.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/types.go create mode 100644 openshift-kube-apiserver/admission/route/hostassignment/admission.go create mode 100644 openshift-kube-apiserver/admission/route/ingress_admission.go create mode 100644 openshift-kube-apiserver/admission/route/ingress_admission_test.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/doc.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/register.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/types.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults_test.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/doc.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/register.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/swagger_doc.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/types.go create mode 100644 openshift-kube-apiserver/admission/scheduler/nodeenv/admission.go create mode 100644 openshift-kube-apiserver/admission/scheduler/nodeenv/admission_test.go create mode 100644 openshift-kube-apiserver/admission/scheduler/nodeenv/intializers.go create mode 100644 openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission.go create mode 100644 openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission_test.go create mode 100644 openshift-kube-apiserver/admission/scheduler/podnodeconstraints/doc.go create mode 100644 openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/admission.go create mode 100644 openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/admission_test.go create mode 100644 openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/doc.go create mode 100644 openshift-kube-apiserver/authorization/browsersafe/authorizer.go create mode 100644 openshift-kube-apiserver/authorization/browsersafe/authorizer_test.go create mode 100644 openshift-kube-apiserver/authorization/scopeauthorizer/authorizer.go create mode 100644 openshift-kube-apiserver/authorization/scopeauthorizer/authorizer_test.go create mode 100644 openshift-kube-apiserver/configdefault/kubecontrolplane_default.go create mode 100644 openshift-kube-apiserver/configdefault/kubecontrolplane_refs.go create mode 100644 openshift-kube-apiserver/enablement/enablement.go create mode 100644 openshift-kube-apiserver/enablement/intialization.go create mode 100644 openshift-kube-apiserver/openshiftkubeapiserver/flags.go create mode 100644 openshift-kube-apiserver/openshiftkubeapiserver/flags_test.go create mode 100644 openshift-kube-apiserver/openshiftkubeapiserver/patch.go create mode 100644 openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go create mode 100644 openshift-kube-apiserver/openshiftkubeapiserver/sdn_readyz_wait.go create mode 100644 openshift-kube-apiserver/openshiftkubeapiserver/wellknown_oauth.go diff --git a/openshift-kube-apiserver/admission/admissionenablement/admission.go b/openshift-kube-apiserver/admission/admissionenablement/admission.go new file mode 100644 index 0000000000000..a701f6d285cae --- /dev/null +++ b/openshift-kube-apiserver/admission/admissionenablement/admission.go @@ -0,0 +1,15 @@ +package admissionenablement + +import ( + "k8s.io/kubernetes/cmd/kube-apiserver/app/options" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration" +) + +func InstallOpenShiftAdmissionPlugins(o *options.ServerRunOptions) { + existingAdmissionOrder := o.Admission.GenericAdmission.RecommendedPluginOrder + o.Admission.GenericAdmission.RecommendedPluginOrder = NewOrderedKubeAdmissionPlugins(existingAdmissionOrder) + RegisterOpenshiftKubeAdmissionPlugins(o.Admission.GenericAdmission.Plugins) + customresourcevalidationregistration.RegisterCustomResourceValidation(o.Admission.GenericAdmission.Plugins) + existingDefaultOff := o.Admission.GenericAdmission.DefaultOffPlugins + o.Admission.GenericAdmission.DefaultOffPlugins = NewDefaultOffPluginsFunc(existingDefaultOff)() +} diff --git a/openshift-kube-apiserver/admission/admissionenablement/admission_config.go b/openshift-kube-apiserver/admission/admissionenablement/admission_config.go new file mode 100644 index 0000000000000..dedb9eddbc00f --- /dev/null +++ b/openshift-kube-apiserver/admission/admissionenablement/admission_config.go @@ -0,0 +1,51 @@ +package admissionenablement + +import ( + "time" + + "github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/namespaceconditions" + controlplaneapiserver "k8s.io/kubernetes/pkg/controlplane/apiserver/options" +) + +const disableSCCLevelLabel = "security.openshift.io/disable-securitycontextconstraints" + +var enforceSCCSelector labels.Selector + +func init() { + var err error + enforceSCCSelector, err = labels.Parse(disableSCCLevelLabel + " != true") + if err != nil { + panic(err) + } +} + +func SetAdmissionDefaults(o *controlplaneapiserver.CompletedOptions, informers informers.SharedInformerFactory, kubeClient kubernetes.Interface) { + // set up the decorators we need. This is done late and out of order because our decorators currently require informers which are not + // present until we start running + namespaceLabelDecorator := namespaceconditions.NamespaceLabelConditions{ + NamespaceClient: kubeClient.CoreV1(), + NamespaceLister: informers.Core().V1().Namespaces().Lister(), + + SkipLevelZeroNames: SkipRunLevelZeroPlugins, + SkipLevelOneNames: SkipRunLevelOnePlugins, + } + sccLabelDecorator := namespaceconditions.NewConditionalAdmissionPlugins( + kubeClient.CoreV1(), informers.Core().V1().Namespaces().Lister(), enforceSCCSelector, + "security.openshift.io/SecurityContextConstraint", "security.openshift.io/SCCExecRestrictions") + + o.Admission.GenericAdmission.Decorators = append(o.Admission.GenericAdmission.Decorators, + admission.Decorators{ + // SCC can be skipped by setting a namespace label `security.openshift.io/disable-securitycontextconstraints = true` + // This is useful for disabling SCC and using PodSecurity admission instead. + admission.DecoratorFunc(sccLabelDecorator.WithNamespaceLabelSelector), + + admission.DecoratorFunc(namespaceLabelDecorator.WithNamespaceLabelConditions), + admission.DecoratorFunc(admissiontimeout.AdmissionTimeout{Timeout: 13 * time.Second}.WithTimeout), + }, + ) +} diff --git a/openshift-kube-apiserver/admission/admissionenablement/register.go b/openshift-kube-apiserver/admission/admissionenablement/register.go new file mode 100644 index 0000000000000..e652a88010f77 --- /dev/null +++ b/openshift-kube-apiserver/admission/admissionenablement/register.go @@ -0,0 +1,122 @@ +package admissionenablement + +import ( + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/plugin/resourcequota" + mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" + + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy" + imagepolicyapiv1 "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1" + quotaclusterresourcequota "github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota" + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission" + authorizationrestrictusers "k8s.io/kubernetes/openshift-kube-apiserver/admission/authorization/restrictusers" + quotaclusterresourceoverride "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managednode" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride" + quotarunonceduration "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/runonceduration" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/externalipranger" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/restrictedendpoints" + ingressadmission "k8s.io/kubernetes/openshift-kube-apiserver/admission/route" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/hostassignment" + projectnodeenv "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/nodeenv" + schedulerpodnodeconstraints "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/podnodeconstraints" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity" +) + +func RegisterOpenshiftKubeAdmissionPlugins(plugins *admission.Plugins) { + authorizationrestrictusers.Register(plugins) + hostassignment.Register(plugins) + imagepolicy.Register(plugins) + ingressadmission.Register(plugins) + managementcpusoverride.Register(plugins) + managednode.Register(plugins) + projectnodeenv.Register(plugins) + quotaclusterresourceoverride.Register(plugins) + quotaclusterresourcequota.Register(plugins) + quotarunonceduration.Register(plugins) + schedulerpodnodeconstraints.Register(plugins) + sccadmission.Register(plugins) + sccadmission.RegisterSCCExecRestrictions(plugins) + externalipranger.RegisterExternalIP(plugins) + restrictedendpoints.RegisterRestrictedEndpoints(plugins) + csiinlinevolumesecurity.Register(plugins) +} + +var ( + + // these are admission plugins that cannot be applied until after the kubeapiserver starts. + // TODO if nothing comes to mind in 3.10, kill this + SkipRunLevelZeroPlugins = sets.NewString() + // these are admission plugins that cannot be applied until after the openshiftapiserver apiserver starts. + SkipRunLevelOnePlugins = sets.NewString( + imagepolicyapiv1.PluginName, // "image.openshift.io/ImagePolicy" + "quota.openshift.io/ClusterResourceQuota", + "security.openshift.io/SecurityContextConstraint", + "security.openshift.io/SCCExecRestrictions", + ) + + // openshiftAdmissionPluginsForKubeBeforeMutating are the admission plugins to add after kube admission, before mutating webhooks + openshiftAdmissionPluginsForKubeBeforeMutating = []string{ + "autoscaling.openshift.io/ClusterResourceOverride", + managementcpusoverride.PluginName, // "autoscaling.openshift.io/ManagementCPUsOverride" + "authorization.openshift.io/RestrictSubjectBindings", + "autoscaling.openshift.io/RunOnceDuration", + "scheduling.openshift.io/PodNodeConstraints", + "scheduling.openshift.io/OriginPodNodeEnvironment", + "network.openshift.io/ExternalIPRanger", + "network.openshift.io/RestrictedEndpointsAdmission", + imagepolicyapiv1.PluginName, // "image.openshift.io/ImagePolicy" + "security.openshift.io/SecurityContextConstraint", + "security.openshift.io/SCCExecRestrictions", + "route.openshift.io/IngressAdmission", + hostassignment.PluginName, // "route.openshift.io/RouteHostAssignment" + csiinlinevolumesecurity.PluginName, // "storage.openshift.io/CSIInlineVolumeSecurity" + managednode.PluginName, // "autoscaling.openshift.io/ManagedNode" + } + + // openshiftAdmissionPluginsForKubeAfterResourceQuota are the plugins to add after ResourceQuota plugin + openshiftAdmissionPluginsForKubeAfterResourceQuota = []string{ + "quota.openshift.io/ClusterResourceQuota", + } + + // additionalDefaultOnPlugins is a list of plugins we turn on by default that core kube does not. + additionalDefaultOnPlugins = sets.NewString( + "NodeRestriction", + "OwnerReferencesPermissionEnforcement", + "PodNodeSelector", + "PodTolerationRestriction", + "Priority", + imagepolicyapiv1.PluginName, // "image.openshift.io/ImagePolicy" + "StorageObjectInUseProtection", + ) +) + +func NewOrderedKubeAdmissionPlugins(kubeAdmissionOrder []string) []string { + ret := []string{} + for _, curr := range kubeAdmissionOrder { + if curr == mutatingwebhook.PluginName { + ret = append(ret, openshiftAdmissionPluginsForKubeBeforeMutating...) + ret = append(ret, customresourcevalidationregistration.AllCustomResourceValidators...) + } + + ret = append(ret, curr) + + if curr == resourcequota.PluginName { + ret = append(ret, openshiftAdmissionPluginsForKubeAfterResourceQuota...) + } + } + return ret +} + +func NewDefaultOffPluginsFunc(kubeDefaultOffAdmission sets.Set[string]) func() sets.Set[string] { + return func() sets.Set[string] { + kubeOff := sets.New[string](kubeDefaultOffAdmission.UnsortedList()...) + kubeOff.Delete(additionalDefaultOnPlugins.List()...) + kubeOff.Delete(openshiftAdmissionPluginsForKubeBeforeMutating...) + kubeOff.Delete(openshiftAdmissionPluginsForKubeAfterResourceQuota...) + kubeOff.Delete(customresourcevalidationregistration.AllCustomResourceValidators...) + return kubeOff + } +} diff --git a/openshift-kube-apiserver/admission/admissionenablement/register_test.go b/openshift-kube-apiserver/admission/admissionenablement/register_test.go new file mode 100644 index 0000000000000..3c24cfa9e11e9 --- /dev/null +++ b/openshift-kube-apiserver/admission/admissionenablement/register_test.go @@ -0,0 +1,55 @@ +package admissionenablement + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/kubernetes/pkg/kubeapiserver/options" + + "github.com/openshift/library-go/pkg/apiserver/admission/admissionregistrationtesting" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration" +) + +func TestAdmissionRegistration(t *testing.T) { + orderedAdmissionChain := NewOrderedKubeAdmissionPlugins(options.AllOrderedPlugins) + defaultOffPlugins := NewDefaultOffPluginsFunc(options.DefaultOffAdmissionPlugins())() + registerAllAdmissionPlugins := func(plugins *admission.Plugins) { + genericapiserver.RegisterAllAdmissionPlugins(plugins) + options.RegisterAllAdmissionPlugins(plugins) + RegisterOpenshiftKubeAdmissionPlugins(plugins) + customresourcevalidationregistration.RegisterCustomResourceValidation(plugins) + } + plugins := admission.NewPlugins() + registerAllAdmissionPlugins(plugins) + + err := admissionregistrationtesting.AdmissionRegistrationTest(plugins, orderedAdmissionChain, sets.Set[string](defaultOffPlugins)) + if err != nil { + t.Fatal(err) + } +} + +// TestResourceQuotaBeforeClusterResourceQuota simply test wheather ResourceQuota plugin is before ClusterResourceQuota plugin +func TestResourceQuotaBeforeClusterResourceQuota(t *testing.T) { + orderedAdmissionChain := NewOrderedKubeAdmissionPlugins(options.AllOrderedPlugins) + + expectedOrderedAdmissionSubChain := []string{"ResourceQuota", "quota.openshift.io/ClusterResourceQuota", "AlwaysDeny"} + actualOrderedAdmissionChain := extractSubChain(orderedAdmissionChain, expectedOrderedAdmissionSubChain[0]) + + if !reflect.DeepEqual(actualOrderedAdmissionChain, expectedOrderedAdmissionSubChain) { + t.Fatalf("expected %v, got %v ", expectedOrderedAdmissionSubChain, actualOrderedAdmissionChain) + } +} + +func extractSubChain(admissionChain []string, takeFrom string) []string { + indexOfTake := 0 + for index, admission := range admissionChain { + if admission == takeFrom { + indexOfTake = index + break + } + } + return admissionChain[indexOfTake:] +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/groupcache_test.go b/openshift-kube-apiserver/admission/authorization/restrictusers/groupcache_test.go new file mode 100644 index 0000000000000..1dde83cbce2a2 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/groupcache_test.go @@ -0,0 +1,28 @@ +package restrictusers + +import ( + userv1 "github.com/openshift/api/user/v1" +) + +type fakeGroupCache struct { + groups []userv1.Group +} + +func (g fakeGroupCache) GroupsFor(user string) ([]*userv1.Group, error) { + ret := []*userv1.Group{} + for i := range g.groups { + group := &g.groups[i] + for _, currUser := range group.Users { + if user == currUser { + ret = append(ret, group) + break + } + } + + } + return ret, nil +} + +func (g fakeGroupCache) HasSynced() bool { + return true +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/intializers.go b/openshift-kube-apiserver/admission/authorization/restrictusers/intializers.go new file mode 100644 index 0000000000000..d3fdcde4a5113 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/intializers.go @@ -0,0 +1,28 @@ +package restrictusers + +import ( + "k8s.io/apiserver/pkg/admission" + + userinformer "github.com/openshift/client-go/user/informers/externalversions" +) + +func NewInitializer(userInformer userinformer.SharedInformerFactory) admission.PluginInitializer { + return &localInitializer{userInformer: userInformer} +} + +type WantsUserInformer interface { + SetUserInformer(userinformer.SharedInformerFactory) + admission.InitializationValidator +} + +type localInitializer struct { + userInformer userinformer.SharedInformerFactory +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsUserInformer); ok { + wants.SetUserInformer(i.userInformer) + } +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers.go b/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers.go new file mode 100644 index 0000000000000..4c78858203181 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers.go @@ -0,0 +1,234 @@ +package restrictusers + +import ( + "context" + "errors" + "fmt" + "io" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/apis/rbac" + + userv1 "github.com/openshift/api/user/v1" + authorizationtypedclient "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + userclient "github.com/openshift/client-go/user/clientset/versioned" + userinformer "github.com/openshift/client-go/user/informers/externalversions" + "github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/authorization/restrictusers/usercache" +) + +func Register(plugins *admission.Plugins) { + plugins.Register("authorization.openshift.io/RestrictSubjectBindings", + func(config io.Reader) (admission.Interface, error) { + return NewRestrictUsersAdmission() + }) +} + +type GroupCache interface { + GroupsFor(string) ([]*userv1.Group, error) + HasSynced() bool +} + +// restrictUsersAdmission implements admission.ValidateInterface and enforces +// restrictions on adding rolebindings in a project to permit only designated +// subjects. +type restrictUsersAdmission struct { + *admission.Handler + + roleBindingRestrictionsGetter authorizationtypedclient.RoleBindingRestrictionsGetter + userClient userclient.Interface + kubeClient kubernetes.Interface + groupCache GroupCache +} + +var _ = admissionrestconfig.WantsRESTClientConfig(&restrictUsersAdmission{}) +var _ = WantsUserInformer(&restrictUsersAdmission{}) +var _ = initializer.WantsExternalKubeClientSet(&restrictUsersAdmission{}) +var _ = admission.ValidationInterface(&restrictUsersAdmission{}) + +// NewRestrictUsersAdmission configures an admission plugin that enforces +// restrictions on adding role bindings in a project. +func NewRestrictUsersAdmission() (admission.Interface, error) { + return &restrictUsersAdmission{ + Handler: admission.NewHandler(admission.Create, admission.Update), + }, nil +} + +func (q *restrictUsersAdmission) SetExternalKubeClientSet(c kubernetes.Interface) { + q.kubeClient = c +} + +func (q *restrictUsersAdmission) SetRESTClientConfig(restClientConfig rest.Config) { + var err error + + // RoleBindingRestriction is served using CRD resource any status update must use JSON + jsonClientConfig := rest.CopyConfig(&restClientConfig) + jsonClientConfig.ContentConfig.AcceptContentTypes = "application/json" + jsonClientConfig.ContentConfig.ContentType = "application/json" + + q.roleBindingRestrictionsGetter, err = authorizationtypedclient.NewForConfig(jsonClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } + + q.userClient, err = userclient.NewForConfig(&restClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } +} + +func (q *restrictUsersAdmission) SetUserInformer(userInformers userinformer.SharedInformerFactory) { + q.groupCache = usercache.NewGroupCache(userInformers.User().V1().Groups()) +} + +// subjectsDelta returns the relative complement of elementsToIgnore in +// elements (i.e., elements∖elementsToIgnore). +func subjectsDelta(elementsToIgnore, elements []rbac.Subject) []rbac.Subject { + result := []rbac.Subject{} + + for _, el := range elements { + keep := true + for _, skipEl := range elementsToIgnore { + if el == skipEl { + keep = false + break + } + } + if keep { + result = append(result, el) + } + } + + return result +} + +// Admit makes admission decisions that enforce restrictions on adding +// project-scoped role-bindings. In order for a role binding to be permitted, +// each subject in the binding must be matched by some rolebinding restriction +// in the namespace. +func (q *restrictUsersAdmission) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) (err error) { + + // We only care about rolebindings + if a.GetResource().GroupResource() != rbac.Resource("rolebindings") { + return nil + } + + // Ignore all operations that correspond to subresource actions. + if len(a.GetSubresource()) != 0 { + return nil + } + + ns := a.GetNamespace() + // Ignore cluster-level resources. + if len(ns) == 0 { + return nil + } + + var oldSubjects []rbac.Subject + + obj, oldObj := a.GetObject(), a.GetOldObject() + + rolebinding, ok := obj.(*rbac.RoleBinding) + if !ok { + return admission.NewForbidden(a, + fmt.Errorf("wrong object type for new rolebinding: %T", obj)) + } + + if len(rolebinding.Subjects) == 0 { + klog.V(4).Infof("No new subjects; admitting") + return nil + } + + if oldObj != nil { + oldrolebinding, ok := oldObj.(*rbac.RoleBinding) + if !ok { + return admission.NewForbidden(a, + fmt.Errorf("wrong object type for old rolebinding: %T", oldObj)) + } + oldSubjects = oldrolebinding.Subjects + } + + klog.V(4).Infof("Handling rolebinding %s/%s", + rolebinding.Namespace, rolebinding.Name) + + newSubjects := subjectsDelta(oldSubjects, rolebinding.Subjects) + if len(newSubjects) == 0 { + klog.V(4).Infof("No new subjects; admitting") + return nil + } + + // RoleBindingRestrictions admission plugin is DefaultAllow, hence RBRs can't use an informer, + // because it's impossible to know if cache is up-to-date + roleBindingRestrictionList, err := q.roleBindingRestrictionsGetter.RoleBindingRestrictions(ns). + List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return admission.NewForbidden(a, fmt.Errorf("could not list rolebinding restrictions: %v", err)) + } + if len(roleBindingRestrictionList.Items) == 0 { + klog.V(4).Infof("No rolebinding restrictions specified; admitting") + return nil + } + + checkers := []SubjectChecker{} + for _, rbr := range roleBindingRestrictionList.Items { + checker, err := NewSubjectChecker(&rbr.Spec) + if err != nil { + return admission.NewForbidden(a, fmt.Errorf("could not create rolebinding restriction subject checker: %v", err)) + } + checkers = append(checkers, checker) + } + + roleBindingRestrictionContext, err := newRoleBindingRestrictionContext(ns, + q.kubeClient, q.userClient.UserV1(), q.groupCache) + if err != nil { + return admission.NewForbidden(a, fmt.Errorf("could not create rolebinding restriction context: %v", err)) + } + + checker := NewUnionSubjectChecker(checkers) + + errs := []error{} + for _, subject := range newSubjects { + allowed, err := checker.Allowed(subject, roleBindingRestrictionContext) + if err != nil { + errs = append(errs, err) + } + if !allowed { + errs = append(errs, + fmt.Errorf("rolebindings to %s %q are not allowed in project %q", + subject.Kind, subject.Name, ns)) + } + } + if len(errs) != 0 { + return admission.NewForbidden(a, kerrors.NewAggregate(errs)) + } + + klog.V(4).Infof("All new subjects are allowed; admitting") + + return nil +} + +func (q *restrictUsersAdmission) ValidateInitialization() error { + if q.kubeClient == nil { + return errors.New("RestrictUsersAdmission plugin requires a Kubernetes client") + } + if q.roleBindingRestrictionsGetter == nil { + return errors.New("RestrictUsersAdmission plugin requires an OpenShift client") + } + if q.userClient == nil { + return errors.New("RestrictUsersAdmission plugin requires an OpenShift user client") + } + if q.groupCache == nil { + return errors.New("RestrictUsersAdmission plugin requires a group cache") + } + + return nil +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers_test.go b/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers_test.go new file mode 100644 index 0000000000000..50dd6eb5faea9 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers_test.go @@ -0,0 +1,404 @@ +package restrictusers + +import ( + "context" + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/kubernetes/pkg/apis/rbac" + + authorizationv1 "github.com/openshift/api/authorization/v1" + userv1 "github.com/openshift/api/user/v1" + fakeauthorizationclient "github.com/openshift/client-go/authorization/clientset/versioned/fake" + fakeuserclient "github.com/openshift/client-go/user/clientset/versioned/fake" +) + +func TestAdmission(t *testing.T) { + var ( + userAlice = userv1.User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Alice", + Labels: map[string]string{"foo": "bar"}, + }, + } + userAliceSubj = rbac.Subject{ + Kind: rbac.UserKind, + Name: "Alice", + } + + userBob = userv1.User{ + ObjectMeta: metav1.ObjectMeta{Name: "Bob"}, + Groups: []string{"group"}, + } + userBobSubj = rbac.Subject{ + Kind: rbac.UserKind, + Name: "Bob", + } + + group = userv1.Group{ + ObjectMeta: metav1.ObjectMeta{ + Name: "group", + Labels: map[string]string{"baz": "quux"}, + }, + Users: []string{userBobSubj.Name}, + } + groupSubj = rbac.Subject{ + Kind: rbac.GroupKind, + Name: "group", + } + + serviceaccount = corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "serviceaccount", + Labels: map[string]string{"xyzzy": "thud"}, + }, + } + serviceaccountSubj = rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Namespace: "namespace", + Name: "serviceaccount", + } + ) + + testCases := []struct { + name string + expectedErr string + + object runtime.Object + oldObject runtime.Object + kind schema.GroupVersionKind + resource schema.GroupVersionResource + namespace string + subresource string + kubeObjects []runtime.Object + authorizationObjects []runtime.Object + userObjects []runtime.Object + }{ + { + name: "ignore (allow) if subresource is nonempty", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{userAliceSubj}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "subresource", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + }, + { + name: "ignore (allow) cluster-scoped rolebinding", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{userAliceSubj}, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + }, + { + name: "allow if the namespace has no rolebinding restrictions", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + userBobSubj, + groupSubj, + serviceaccountSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + }, + { + name: "allow if any rolebinding with the subject already exists", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + groupSubj, + serviceaccountSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + groupSubj, + serviceaccountSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + authorizationObjects: []runtime.Object{ + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bogus-matcher", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{}, + }, + }, + }, + }, + { + name: "allow a user, group, or service account in a rolebinding if a literal matches", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + serviceaccountSubj, + groupSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + authorizationObjects: []runtime.Object{ + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "match-users", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Users: []string{userAlice.Name}, + }, + }, + }, + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "match-groups", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + GroupRestriction: &authorizationv1.GroupRestriction{ + Groups: []string{group.Name}, + }, + }, + }, + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "match-serviceaccounts", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Name: serviceaccount.Name, + Namespace: serviceaccount.Namespace, + }, + }, + }, + }, + }, + }, + }, + { + name: "prohibit user without a matching user literal", + expectedErr: fmt.Sprintf("rolebindings to %s %q are not allowed", + userAliceSubj.Kind, userAliceSubj.Name), + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + authorizationObjects: []runtime.Object{ + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "match-users-bob", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Users: []string{userBobSubj.Name}, + }, + }, + }, + }, + userObjects: []runtime.Object{ + &userAlice, + &userBob, + }, + }, + } + + stopCh := make(chan struct{}) + defer close(stopCh) + + for _, tc := range testCases { + kclientset := fake.NewSimpleClientset(tc.kubeObjects...) + fakeUserClient := fakeuserclient.NewSimpleClientset(tc.userObjects...) + fakeAuthorizationClient := fakeauthorizationclient.NewSimpleClientset(tc.authorizationObjects...) + + plugin, err := NewRestrictUsersAdmission() + if err != nil { + t.Errorf("unexpected error initializing admission plugin: %v", err) + } + + plugin.(*restrictUsersAdmission).kubeClient = kclientset + plugin.(*restrictUsersAdmission).roleBindingRestrictionsGetter = fakeAuthorizationClient.AuthorizationV1() + plugin.(*restrictUsersAdmission).userClient = fakeUserClient + plugin.(*restrictUsersAdmission).groupCache = fakeGroupCache{} + + err = admission.ValidateInitialization(plugin) + if err != nil { + t.Errorf("unexpected error validating admission plugin: %v", err) + } + + attributes := admission.NewAttributesRecord( + tc.object, + tc.oldObject, + tc.kind, + tc.namespace, + tc.name, + tc.resource, + tc.subresource, + admission.Create, + nil, + false, + &user.DefaultInfo{}, + ) + + err = plugin.(admission.ValidationInterface).Validate(context.TODO(), attributes, nil) + switch { + case len(tc.expectedErr) == 0 && err == nil: + case len(tc.expectedErr) == 0 && err != nil: + t.Errorf("%s: unexpected error: %v", tc.name, err) + case len(tc.expectedErr) != 0 && err == nil: + t.Errorf("%s: missing error: %v", tc.name, tc.expectedErr) + case len(tc.expectedErr) != 0 && err != nil && + !strings.Contains(err.Error(), tc.expectedErr): + t.Errorf("%s: missing error: expected %v, got %v", + tc.name, tc.expectedErr, err) + } + } +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker.go b/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker.go new file mode 100644 index 0000000000000..2e10e182b9de9 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker.go @@ -0,0 +1,312 @@ +package restrictusers + +import ( + "context" + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/apis/rbac" + + authorizationv1 "github.com/openshift/api/authorization/v1" + userv1 "github.com/openshift/api/user/v1" + userclient "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" +) + +// SubjectChecker determines whether rolebindings on a subject (user, group, or +// service account) are allowed in a project. +type SubjectChecker interface { + Allowed(rbac.Subject, *RoleBindingRestrictionContext) (bool, error) +} + +// UnionSubjectChecker represents the union of zero or more SubjectCheckers. +type UnionSubjectChecker []SubjectChecker + +// NewUnionSubjectChecker returns a new UnionSubjectChecker. +func NewUnionSubjectChecker(checkers []SubjectChecker) UnionSubjectChecker { + return UnionSubjectChecker(checkers) +} + +// Allowed determines whether the given subject is allowed in rolebindings in +// the project. +func (checkers UnionSubjectChecker) Allowed(subject rbac.Subject, ctx *RoleBindingRestrictionContext) (bool, error) { + errs := []error{} + for _, checker := range []SubjectChecker(checkers) { + allowed, err := checker.Allowed(subject, ctx) + if err != nil { + errs = append(errs, err) + } else if allowed { + return true, nil + } + } + + return false, kerrors.NewAggregate(errs) +} + +// RoleBindingRestrictionContext holds context that is used when determining +// whether a RoleBindingRestriction allows rolebindings on a particular subject. +type RoleBindingRestrictionContext struct { + userClient userclient.UserV1Interface + kclient kubernetes.Interface + + // groupCache maps user name to groups. + groupCache GroupCache + + // userToLabels maps user name to labels.Set. + userToLabelSet map[string]labels.Set + + // groupToLabels maps group name to labels.Set. + groupToLabelSet map[string]labels.Set + + // namespace is the namespace for which the RoleBindingRestriction makes + // determinations. + namespace string +} + +// NewRoleBindingRestrictionContext returns a new RoleBindingRestrictionContext +// object. +func newRoleBindingRestrictionContext(ns string, kc kubernetes.Interface, userClient userclient.UserV1Interface, groupCache GroupCache) (*RoleBindingRestrictionContext, error) { + return &RoleBindingRestrictionContext{ + namespace: ns, + kclient: kc, + userClient: userClient, + groupCache: groupCache, + userToLabelSet: map[string]labels.Set{}, + groupToLabelSet: map[string]labels.Set{}, + }, nil +} + +// labelSetForUser returns the label set for the given user subject. +func (ctx *RoleBindingRestrictionContext) labelSetForUser(subject rbac.Subject) (labels.Set, error) { + if subject.Kind != rbac.UserKind { + return labels.Set{}, fmt.Errorf("not a user: %q", subject.Name) + } + + labelSet, ok := ctx.userToLabelSet[subject.Name] + if ok { + return labelSet, nil + } + + user, err := ctx.userClient.Users().Get(context.TODO(), subject.Name, metav1.GetOptions{}) + if err != nil { + return labels.Set{}, err + } + + ctx.userToLabelSet[subject.Name] = labels.Set(user.Labels) + + return ctx.userToLabelSet[subject.Name], nil +} + +// groupsForUser returns the groups for the given user subject. +func (ctx *RoleBindingRestrictionContext) groupsForUser(subject rbac.Subject) ([]*userv1.Group, error) { + if subject.Kind != rbac.UserKind { + return []*userv1.Group{}, fmt.Errorf("not a user: %q", subject.Name) + } + + err := wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { + return ctx.groupCache.HasSynced(), nil + }) + if err != nil { + return nil, fmt.Errorf("groups.user.openshift.io cache is not synchronized") + } + + return ctx.groupCache.GroupsFor(subject.Name) +} + +// labelSetForGroup returns the label set for the given group subject. +func (ctx *RoleBindingRestrictionContext) labelSetForGroup(subject rbac.Subject) (labels.Set, error) { + if subject.Kind != rbac.GroupKind { + return labels.Set{}, fmt.Errorf("not a group: %q", subject.Name) + } + + labelSet, ok := ctx.groupToLabelSet[subject.Name] + if ok { + return labelSet, nil + } + + group, err := ctx.userClient.Groups().Get(context.TODO(), subject.Name, metav1.GetOptions{}) + if err != nil { + return labels.Set{}, err + } + + ctx.groupToLabelSet[subject.Name] = labels.Set(group.Labels) + + return ctx.groupToLabelSet[subject.Name], nil +} + +// UserSubjectChecker determines whether a user subject is allowed in +// rolebindings in the project. +type UserSubjectChecker struct { + userRestriction *authorizationv1.UserRestriction +} + +// NewUserSubjectChecker returns a new UserSubjectChecker. +func NewUserSubjectChecker(userRestriction *authorizationv1.UserRestriction) UserSubjectChecker { + return UserSubjectChecker{userRestriction: userRestriction} +} + +// Allowed determines whether the given user subject is allowed in rolebindings +// in the project. +func (checker UserSubjectChecker) Allowed(subject rbac.Subject, ctx *RoleBindingRestrictionContext) (bool, error) { + if subject.Kind != rbac.UserKind { + return false, nil + } + + for _, userName := range checker.userRestriction.Users { + if subject.Name == userName { + return true, nil + } + } + + if len(checker.userRestriction.Groups) != 0 { + subjectGroups, err := ctx.groupsForUser(subject) + if err != nil { + return false, err + } + + for _, groupName := range checker.userRestriction.Groups { + for _, group := range subjectGroups { + if group.Name == groupName { + return true, nil + } + } + } + } + + if len(checker.userRestriction.Selectors) != 0 { + labelSet, err := ctx.labelSetForUser(subject) + if err != nil { + return false, err + } + + for _, labelSelector := range checker.userRestriction.Selectors { + selector, err := metav1.LabelSelectorAsSelector(&labelSelector) + if err != nil { + return false, err + } + + if selector.Matches(labelSet) { + return true, nil + } + } + } + + return false, nil +} + +// GroupSubjectChecker determines whether a group subject is allowed in +// rolebindings in the project. +type GroupSubjectChecker struct { + groupRestriction *authorizationv1.GroupRestriction +} + +// NewGroupSubjectChecker returns a new GroupSubjectChecker. +func NewGroupSubjectChecker(groupRestriction *authorizationv1.GroupRestriction) GroupSubjectChecker { + return GroupSubjectChecker{groupRestriction: groupRestriction} +} + +// Allowed determines whether the given group subject is allowed in rolebindings +// in the project. +func (checker GroupSubjectChecker) Allowed(subject rbac.Subject, ctx *RoleBindingRestrictionContext) (bool, error) { + if subject.Kind != rbac.GroupKind { + return false, nil + } + + for _, groupName := range checker.groupRestriction.Groups { + if subject.Name == groupName { + return true, nil + } + } + + if len(checker.groupRestriction.Selectors) != 0 { + labelSet, err := ctx.labelSetForGroup(subject) + if err != nil { + return false, err + } + + for _, labelSelector := range checker.groupRestriction.Selectors { + selector, err := metav1.LabelSelectorAsSelector(&labelSelector) + if err != nil { + return false, err + } + + if selector.Matches(labelSet) { + return true, nil + } + } + } + + return false, nil +} + +// ServiceAccountSubjectChecker determines whether a serviceaccount subject is +// allowed in rolebindings in the project. +type ServiceAccountSubjectChecker struct { + serviceAccountRestriction *authorizationv1.ServiceAccountRestriction +} + +// NewServiceAccountSubjectChecker returns a new ServiceAccountSubjectChecker. +func NewServiceAccountSubjectChecker(serviceAccountRestriction *authorizationv1.ServiceAccountRestriction) ServiceAccountSubjectChecker { + return ServiceAccountSubjectChecker{ + serviceAccountRestriction: serviceAccountRestriction, + } +} + +// Allowed determines whether the given serviceaccount subject is allowed in +// rolebindings in the project. +func (checker ServiceAccountSubjectChecker) Allowed(subject rbac.Subject, ctx *RoleBindingRestrictionContext) (bool, error) { + if subject.Kind != rbac.ServiceAccountKind { + return false, nil + } + + subjectNamespace := subject.Namespace + if len(subjectNamespace) == 0 { + // If a RoleBinding has a subject that is a ServiceAccount with + // no namespace specified, the namespace will be defaulted to + // that of the RoleBinding. However, admission control plug-ins + // execute before this happens, so in order not to reject such + // subjects erroneously, we copy the logic here of using the + // RoleBinding's namespace if the subject's is empty. + subjectNamespace = ctx.namespace + } + + for _, namespace := range checker.serviceAccountRestriction.Namespaces { + if subjectNamespace == namespace { + return true, nil + } + } + + for _, serviceAccountRef := range checker.serviceAccountRestriction.ServiceAccounts { + serviceAccountNamespace := serviceAccountRef.Namespace + if len(serviceAccountNamespace) == 0 { + serviceAccountNamespace = ctx.namespace + } + + if subject.Name == serviceAccountRef.Name && + subjectNamespace == serviceAccountNamespace { + return true, nil + } + } + + return false, nil +} + +// NewSubjectChecker returns a new SubjectChecker. +func NewSubjectChecker(spec *authorizationv1.RoleBindingRestrictionSpec) (SubjectChecker, error) { + switch { + case spec.UserRestriction != nil: + return NewUserSubjectChecker(spec.UserRestriction), nil + + case spec.GroupRestriction != nil: + return NewGroupSubjectChecker(spec.GroupRestriction), nil + + case spec.ServiceAccountRestriction != nil: + return NewServiceAccountSubjectChecker(spec.ServiceAccountRestriction), nil + } + + return nil, fmt.Errorf("invalid RoleBindingRestrictionSpec: %v", spec) +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker_test.go b/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker_test.go new file mode 100644 index 0000000000000..4580d3582f93e --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker_test.go @@ -0,0 +1,349 @@ +package restrictusers + +import ( + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/kubernetes/pkg/apis/rbac" + + authorizationv1 "github.com/openshift/api/authorization/v1" + userv1 "github.com/openshift/api/user/v1" + fakeuserclient "github.com/openshift/client-go/user/clientset/versioned/fake" +) + +func mustNewSubjectChecker(t *testing.T, spec *authorizationv1.RoleBindingRestrictionSpec) SubjectChecker { + checker, err := NewSubjectChecker(spec) + if err != nil { + t.Errorf("unexpected error from NewChecker: %v, spec: %#v", err, spec) + } + + return checker +} + +func TestSubjectCheckers(t *testing.T) { + var ( + userBobRef = rbac.Subject{ + Kind: rbac.UserKind, + Name: "Bob", + } + userAliceRef = rbac.Subject{ + Kind: rbac.UserKind, + Name: "Alice", + } + groupRef = rbac.Subject{ + Kind: rbac.GroupKind, + Name: "group", + } + serviceaccountRef = rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Namespace: "namespace", + Name: "serviceaccount", + } + group = userv1.Group{ + ObjectMeta: metav1.ObjectMeta{ + Name: "group", + Labels: map[string]string{"baz": "quux"}, + }, + Users: []string{userBobRef.Name}, + } + userObjects = []runtime.Object{ + &userv1.User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Alice", + Labels: map[string]string{"foo": "bar"}, + }, + }, + &userv1.User{ + ObjectMeta: metav1.ObjectMeta{Name: "Bob"}, + Groups: []string{"group"}, + }, + &group, + } + kubeObjects = []runtime.Object{ + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "serviceaccount", + Labels: map[string]string{"xyzzy": "thud"}, + }, + }, + } + ) + + testCases := []struct { + name string + checker SubjectChecker + subject rbac.Subject + shouldAllow bool + }{ + { + name: "allow regular user by literal name match", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Users: []string{userAliceRef.Name}, + }, + }), + subject: userAliceRef, + shouldAllow: true, + }, + { + name: "allow regular user by group membership", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Groups: []string{groupRef.Name}, + }, + }), + subject: userBobRef, + shouldAllow: true, + }, + { + name: "prohibit regular user when another user matches on group membership", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Groups: []string{groupRef.Name}, + }, + }), + subject: userAliceRef, + shouldAllow: false, + }, + { + name: "allow regular user by label selector match", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Selectors: []metav1.LabelSelector{ + {MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + }), + subject: userAliceRef, + shouldAllow: true, + }, + { + name: "prohibit regular user when another user matches on label selector", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Selectors: []metav1.LabelSelector{ + {MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + }), + subject: userBobRef, + shouldAllow: false, + }, + { + name: "allow regular group by literal name match", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + GroupRestriction: &authorizationv1.GroupRestriction{ + Groups: []string{groupRef.Name}, + }, + }), + subject: groupRef, + shouldAllow: true, + }, + { + name: "allow regular group by label selector match", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + GroupRestriction: &authorizationv1.GroupRestriction{ + Selectors: []metav1.LabelSelector{ + {MatchLabels: map[string]string{"baz": "quux"}}, + }, + }, + }), + subject: groupRef, + shouldAllow: true, + }, + { + name: "allow service account with explicit namespace by match on literal name and explicit namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Name: serviceaccountRef.Name, + Namespace: serviceaccountRef.Namespace, + }, + }, + }, + }), + subject: serviceaccountRef, + shouldAllow: true, + }, + { + name: "allow service account with explicit namespace by match on literal name and implicit namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + {Name: serviceaccountRef.Name}, + }, + }, + }), + subject: serviceaccountRef, + shouldAllow: true, + }, + { + name: "prohibit service account with explicit namespace where literal name matches but explicit namespace does not", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Namespace: serviceaccountRef.Namespace, + Name: serviceaccountRef.Name, + }, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Namespace: "othernamespace", + Name: serviceaccountRef.Name, + }, + shouldAllow: false, + }, + { + name: "prohibit service account with explicit namespace where literal name matches but implicit namespace does not", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + {Name: serviceaccountRef.Name}, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Namespace: "othernamespace", + Name: serviceaccountRef.Name, + }, + shouldAllow: false, + }, + { + name: "allow service account with implicit namespace by match on literal name and explicit namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Name: serviceaccountRef.Name, + Namespace: serviceaccountRef.Namespace, + }, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Name: serviceaccountRef.Name, + }, + shouldAllow: true, + }, + { + name: "allow service account with implicit namespace by match on literal name and implicit namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + {Name: serviceaccountRef.Name}, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Name: serviceaccountRef.Name, + }, + shouldAllow: true, + }, + { + name: "prohibit service account with implicit namespace where literal name matches but explicit namespace does not", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Namespace: "othernamespace", + Name: serviceaccountRef.Name, + }, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Name: serviceaccountRef.Name, + }, + shouldAllow: false, + }, + { + name: "prohibit service account with explicit namespace where explicit namespace matches but literal name does not", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Namespace: serviceaccountRef.Namespace, + Name: "othername", + }, + }, + }, + }), + subject: serviceaccountRef, + shouldAllow: false, + }, + { + name: "allow service account by match on namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + Namespaces: []string{serviceaccountRef.Namespace}, + }, + }), + subject: serviceaccountRef, + shouldAllow: true, + }, + } + + stopCh := make(chan struct{}) + defer close(stopCh) + + kclient := fake.NewSimpleClientset(kubeObjects...) + fakeUserClient := fakeuserclient.NewSimpleClientset(userObjects...) + groupCache := fakeGroupCache{groups: []userv1.Group{group}} + // This is a terrible, horrible, no-good, very bad hack to avoid a race + // condition between the test "allow regular user by group membership" + // and the group cache's initialisation. + for { + if groups, _ := groupCache.GroupsFor(group.Users[0]); len(groups) == 1 { + break + } + time.Sleep(10 * time.Millisecond) + } + + ctx, err := newRoleBindingRestrictionContext("namespace", + kclient, fakeUserClient.UserV1(), groupCache) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + for _, tc := range testCases { + allowed, err := tc.checker.Allowed(tc.subject, ctx) + if err != nil { + t.Errorf("test case %v: unexpected error: %v", tc.name, err) + } + if allowed && !tc.shouldAllow { + t.Errorf("test case %v: subject allowed but should be prohibited", tc.name) + } + if !allowed && tc.shouldAllow { + t.Errorf("test case %v: subject prohibited but should be allowed", tc.name) + } + } +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/usercache/groups.go b/openshift-kube-apiserver/admission/authorization/restrictusers/usercache/groups.go new file mode 100644 index 0000000000000..99a8156be3053 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/usercache/groups.go @@ -0,0 +1,55 @@ +package usercache + +import ( + "fmt" + + "k8s.io/client-go/tools/cache" + + userapi "github.com/openshift/api/user/v1" + userinformer "github.com/openshift/client-go/user/informers/externalversions/user/v1" +) + +// GroupCache is a skin on an indexer to provide the reverse index from user to groups. +// Once we work out a cleaner way to extend a lister, this should live there. +type GroupCache struct { + indexer cache.Indexer + groupsSynced cache.InformerSynced +} + +const ByUserIndexName = "ByUser" + +// ByUserIndexKeys is cache.IndexFunc for Groups that will index groups by User, so that a direct cache lookup +// using a User.Name will return all Groups that User is a member of +func ByUserIndexKeys(obj interface{}) ([]string, error) { + group, ok := obj.(*userapi.Group) + if !ok { + return nil, fmt.Errorf("unexpected type: %v", obj) + } + + return group.Users, nil +} + +func NewGroupCache(groupInformer userinformer.GroupInformer) *GroupCache { + return &GroupCache{ + indexer: groupInformer.Informer().GetIndexer(), + groupsSynced: groupInformer.Informer().HasSynced, + } +} + +func (c *GroupCache) GroupsFor(username string) ([]*userapi.Group, error) { + objs, err := c.indexer.ByIndex(ByUserIndexName, username) + if err != nil { + return nil, err + } + + groups := make([]*userapi.Group, len(objs)) + for i := range objs { + groups[i] = objs[i].(*userapi.Group) + } + + return groups, nil +} + +func (c *GroupCache) HasSynced() bool { + return c.groupsSynced() +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/doc.go new file mode 100644 index 0000000000000..7f2a6f888d472 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package api is the internal version of the API. +package clusterresourceoverride diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/name.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/name.go new file mode 100644 index 0000000000000..f136def581ed5 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/name.go @@ -0,0 +1,4 @@ +package clusterresourceoverride + +const PluginName = "autoscaling.openshift.io/ClusterResourceOverride" +const ConfigKind = "ClusterResourceOverrideConfig" diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/register.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/register.go new file mode 100644 index 0000000000000..5308853cfd134 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/register.go @@ -0,0 +1,23 @@ +package clusterresourceoverride + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var GroupVersion = schema.GroupVersion{Group: "autoscaling.openshift.io", Version: runtime.APIVersionInternal} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ClusterResourceOverrideConfig{}, + ) + return nil +} + +func (obj *ClusterResourceOverrideConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/types.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/types.go new file mode 100644 index 0000000000000..3718e265caafa --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/types.go @@ -0,0 +1,24 @@ +package clusterresourceoverride + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterResourceOverrideConfig is the configuration for the ClusterResourceOverride +// admission controller which overrides user-provided container request/limit values. +type ClusterResourceOverrideConfig struct { + metav1.TypeMeta + // For each of the following, if a non-zero ratio is specified then the initial + // value (if any) in the pod spec is overwritten according to the ratio. + // LimitRange defaults are merged prior to the override. + // + // LimitCPUToMemoryPercent (if > 0) overrides the CPU limit to a ratio of the memory limit; + // 100% overrides CPU to 1 core per 1GiB of RAM. This is done before overriding the CPU request. + LimitCPUToMemoryPercent int64 + // CPURequestToLimitPercent (if > 0) overrides CPU request to a percentage of CPU limit + CPURequestToLimitPercent int64 + // MemoryRequestToLimitPercent (if > 0) overrides memory request to a percentage of memory limit + MemoryRequestToLimitPercent int64 +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/doc.go new file mode 100644 index 0000000000000..7397986b23605 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/register.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/register.go new file mode 100644 index 0000000000000..91d44566e3476 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/register.go @@ -0,0 +1,27 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride" +) + +func (obj *ClusterResourceOverrideConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +var GroupVersion = schema.GroupVersion{Group: "autoscaling.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + clusterresourceoverride.Install, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ClusterResourceOverrideConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/swagger_doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/swagger_doc.go new file mode 100644 index 0000000000000..f909b0db2ee4f --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/swagger_doc.go @@ -0,0 +1,17 @@ +package v1 + +// This file contains methods that can be used by the go-restful package to generate Swagger +// documentation for the object types found in 'types.go' This file is automatically generated +// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift. +// ==== DO NOT EDIT THIS FILE MANUALLY ==== + +var map_ClusterResourceOverrideConfig = map[string]string{ + "": "ClusterResourceOverrideConfig is the configuration for the ClusterResourceOverride admission controller which overrides user-provided container request/limit values.", + "limitCPUToMemoryPercent": "For each of the following, if a non-zero ratio is specified then the initial value (if any) in the pod spec is overwritten according to the ratio. LimitRange defaults are merged prior to the override.\n\nLimitCPUToMemoryPercent (if > 0) overrides the CPU limit to a ratio of the memory limit; 100% overrides CPU to 1 core per 1GiB of RAM. This is done before overriding the CPU request.", + "cpuRequestToLimitPercent": "CPURequestToLimitPercent (if > 0) overrides CPU request to a percentage of CPU limit", + "memoryRequestToLimitPercent": "MemoryRequestToLimitPercent (if > 0) overrides memory request to a percentage of memory limit", +} + +func (ClusterResourceOverrideConfig) SwaggerDoc() map[string]string { + return map_ClusterResourceOverrideConfig +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/types.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/types.go new file mode 100644 index 0000000000000..9a56034174e15 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/types.go @@ -0,0 +1,24 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterResourceOverrideConfig is the configuration for the ClusterResourceOverride +// admission controller which overrides user-provided container request/limit values. +type ClusterResourceOverrideConfig struct { + metav1.TypeMeta `json:",inline"` + // For each of the following, if a non-zero ratio is specified then the initial + // value (if any) in the pod spec is overwritten according to the ratio. + // LimitRange defaults are merged prior to the override. + // + // LimitCPUToMemoryPercent (if > 0) overrides the CPU limit to a ratio of the memory limit; + // 100% overrides CPU to 1 core per 1GiB of RAM. This is done before overriding the CPU request. + LimitCPUToMemoryPercent int64 `json:"limitCPUToMemoryPercent"` + // CPURequestToLimitPercent (if > 0) overrides CPU request to a percentage of CPU limit + CPURequestToLimitPercent int64 `json:"cpuRequestToLimitPercent"` + // MemoryRequestToLimitPercent (if > 0) overrides memory request to a percentage of memory limit + MemoryRequestToLimitPercent int64 `json:"memoryRequestToLimitPercent"` +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation/validation.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation/validation.go new file mode 100644 index 0000000000000..14cdcdd586abf --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation/validation.go @@ -0,0 +1,27 @@ +package validation + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride" +) + +func Validate(config *clusterresourceoverride.ClusterResourceOverrideConfig) field.ErrorList { + allErrs := field.ErrorList{} + if config == nil { + return allErrs + } + if config.LimitCPUToMemoryPercent == 0 && config.CPURequestToLimitPercent == 0 && config.MemoryRequestToLimitPercent == 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath(clusterresourceoverride.PluginName), "plugin enabled but no percentages were specified")) + } + if config.LimitCPUToMemoryPercent < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath(clusterresourceoverride.PluginName, "LimitCPUToMemoryPercent"), config.LimitCPUToMemoryPercent, "must be positive")) + } + if config.CPURequestToLimitPercent < 0 || config.CPURequestToLimitPercent > 100 { + allErrs = append(allErrs, field.Invalid(field.NewPath(clusterresourceoverride.PluginName, "CPURequestToLimitPercent"), config.CPURequestToLimitPercent, "must be between 0 and 100")) + } + if config.MemoryRequestToLimitPercent < 0 || config.MemoryRequestToLimitPercent > 100 { + allErrs = append(allErrs, field.Invalid(field.NewPath(clusterresourceoverride.PluginName, "MemoryRequestToLimitPercent"), config.MemoryRequestToLimitPercent, "must be between 0 and 100")) + } + return allErrs +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/doc.go new file mode 100644 index 0000000000000..2eb498613c0ad --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package api is the internal version of the API. +package runonceduration diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/register.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/register.go new file mode 100644 index 0000000000000..379c2be1ed1a5 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/register.go @@ -0,0 +1,34 @@ +package runonceduration + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var GroupVersion = schema.GroupVersion{Group: "autoscaling.openshift.io", Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return GroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return GroupVersion.WithResource(resource).GroupResource() +} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &RunOnceDurationConfig{}, + ) + return nil +} + +func (obj *RunOnceDurationConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/types.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/types.go new file mode 100644 index 0000000000000..1a9f5a112c90a --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/types.go @@ -0,0 +1,26 @@ +package runonceduration + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RunOnceDurationConfig is the configuration for the RunOnceDuration plugin. +// It specifies a maximum value for ActiveDeadlineSeconds for a run-once pod. +// The project that contains the pod may specify a different setting. That setting will +// take precedence over the one configured for the plugin here. +type RunOnceDurationConfig struct { + metav1.TypeMeta + + // ActiveDeadlineSecondsOverride is the maximum value to set on containers of run-once pods + // Only a positive value is valid. Absence of a value means that the plugin + // won't make any changes to the pod + ActiveDeadlineSecondsOverride *int64 +} + +// ActiveDeadlineSecondsLimitAnnotation can be set on a project to limit the number of +// seconds that a run-once pod can be active in that project +// TODO: this label needs to change to reflect its function. It's a limit, not an override. +// It is kept this way for compatibility. Only change it in a new version of the API. +const ActiveDeadlineSecondsLimitAnnotation = "openshift.io/active-deadline-seconds-override" diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/conversion.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/conversion.go new file mode 100644 index 0000000000000..31253537849a6 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/conversion.go @@ -0,0 +1,26 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + + internal "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + err := scheme.AddConversionFunc((*RunOnceDurationConfig)(nil), (*internal.RunOnceDurationConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + in := a.(*RunOnceDurationConfig) + out := b.(*internal.RunOnceDurationConfig) + out.ActiveDeadlineSecondsOverride = in.ActiveDeadlineSecondsOverride + return nil + }) + if err != nil { + return err + } + return scheme.AddConversionFunc((*internal.RunOnceDurationConfig)(nil), (*RunOnceDurationConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + in := a.(*internal.RunOnceDurationConfig) + out := b.(*RunOnceDurationConfig) + out.ActiveDeadlineSecondsOverride = in.ActiveDeadlineSecondsOverride + return nil + }) +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/doc.go new file mode 100644 index 0000000000000..f70b886a67a72 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/register.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/register.go new file mode 100644 index 0000000000000..b456123c9fab2 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/register.go @@ -0,0 +1,29 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +func (obj *RunOnceDurationConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +var GroupVersion = schema.GroupVersion{Group: "autoscaling.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + runonceduration.Install, + + addConversionFuncs, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &RunOnceDurationConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/swagger_doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/swagger_doc.go new file mode 100644 index 0000000000000..1cb7c3cdb319f --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/swagger_doc.go @@ -0,0 +1,15 @@ +package v1 + +// This file contains methods that can be used by the go-restful package to generate Swagger +// documentation for the object types found in 'types.go' This file is automatically generated +// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift. +// ==== DO NOT EDIT THIS FILE MANUALLY ==== + +var map_RunOnceDurationConfig = map[string]string{ + "": "RunOnceDurationConfig is the configuration for the RunOnceDuration plugin. It specifies a maximum value for ActiveDeadlineSeconds for a run-once pod. The project that contains the pod may specify a different setting. That setting will take precedence over the one configured for the plugin here.", + "activeDeadlineSecondsOverride": "ActiveDeadlineSecondsOverride is the maximum value to set on containers of run-once pods Only a positive value is valid. Absence of a value means that the plugin won't make any changes to the pod It is kept this way for compatibility. Only change it in a new version of the API.", +} + +func (RunOnceDurationConfig) SwaggerDoc() map[string]string { + return map_RunOnceDurationConfig +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/types.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/types.go new file mode 100644 index 0000000000000..4cfa3823ba10b --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/types.go @@ -0,0 +1,22 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RunOnceDurationConfig is the configuration for the RunOnceDuration plugin. +// It specifies a maximum value for ActiveDeadlineSeconds for a run-once pod. +// The project that contains the pod may specify a different setting. That setting will +// take precedence over the one configured for the plugin here. +type RunOnceDurationConfig struct { + metav1.TypeMeta `json:",inline"` + + // ActiveDeadlineSecondsOverride is the maximum value to set on containers of run-once pods + // Only a positive value is valid. Absence of a value means that the plugin + // won't make any changes to the pod + // TODO: change the external name of this field to reflect that it is a limit, not an override + // It is kept this way for compatibility. Only change it in a new version of the API. + ActiveDeadlineSecondsOverride *int64 `json:"activeDeadlineSecondsOverride,omitempty" description:"maximum value for activeDeadlineSeconds in run-once pods"` +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation.go new file mode 100644 index 0000000000000..7ddcad869845a --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation.go @@ -0,0 +1,18 @@ +package validation + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +// ValidateRunOnceDurationConfig validates the RunOnceDuration plugin configuration +func ValidateRunOnceDurationConfig(config *runonceduration.RunOnceDurationConfig) field.ErrorList { + allErrs := field.ErrorList{} + if config == nil || config.ActiveDeadlineSecondsOverride == nil { + return allErrs + } + if *config.ActiveDeadlineSecondsOverride <= 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("activeDeadlineSecondsOverride"), config.ActiveDeadlineSecondsOverride, "must be greater than 0")) + } + return allErrs +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation_test.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation_test.go new file mode 100644 index 0000000000000..19f6f6d70544b --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation_test.go @@ -0,0 +1,29 @@ +package validation + +import ( + "testing" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +func TestRunOnceDurationConfigValidation(t *testing.T) { + // Check invalid duration returns an error + var invalidSecs int64 = -1 + invalidConfig := &runonceduration.RunOnceDurationConfig{ + ActiveDeadlineSecondsOverride: &invalidSecs, + } + errs := ValidateRunOnceDurationConfig(invalidConfig) + if len(errs) == 0 { + t.Errorf("Did not get expected error on invalid config") + } + + // Check that valid duration returns no error + var validSecs int64 = 5 + validConfig := &runonceduration.RunOnceDurationConfig{ + ActiveDeadlineSecondsOverride: &validSecs, + } + errs = ValidateRunOnceDurationConfig(validConfig) + if len(errs) > 0 { + t.Errorf("Unexpected error on valid config") + } +} diff --git a/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission.go b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission.go new file mode 100644 index 0000000000000..6aed487fdef13 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission.go @@ -0,0 +1,348 @@ +package clusterresourceoverride + +import ( + "context" + "fmt" + "io" + "strings" + + "github.com/openshift/library-go/pkg/config/helpers" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1" + + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + coreapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/plugin/pkg/admission/limitranger" + + api "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation" +) + +const ( + clusterResourceOverrideAnnotation = "autoscaling.openshift.io/cluster-resource-override-enabled" + cpuBaseScaleFactor = 1000.0 / (1024.0 * 1024.0 * 1024.0) // 1000 milliCores per 1GiB +) + +var ( + cpuFloor = resource.MustParse("1m") + memFloor = resource.MustParse("1Mi") +) + +func Register(plugins *admission.Plugins) { + plugins.Register(api.PluginName, + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := ReadConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", api.PluginName) + return nil, nil + } + return newClusterResourceOverride(pluginConfig) + }) +} + +type internalConfig struct { + limitCPUToMemoryRatio float64 + cpuRequestToLimitRatio float64 + memoryRequestToLimitRatio float64 +} +type clusterResourceOverridePlugin struct { + *admission.Handler + config *internalConfig + nsLister corev1listers.NamespaceLister + LimitRanger *limitranger.LimitRanger + limitRangesLister corev1listers.LimitRangeLister +} + +var _ = initializer.WantsExternalKubeInformerFactory(&clusterResourceOverridePlugin{}) +var _ = initializer.WantsExternalKubeClientSet(&clusterResourceOverridePlugin{}) +var _ = admission.MutationInterface(&clusterResourceOverridePlugin{}) +var _ = admission.ValidationInterface(&clusterResourceOverridePlugin{}) + +// newClusterResourceOverride returns an admission controller for containers that +// configurably overrides container resource request/limits +func newClusterResourceOverride(config *api.ClusterResourceOverrideConfig) (admission.Interface, error) { + klog.V(2).Infof("%s admission controller loaded with config: %v", api.PluginName, config) + var internal *internalConfig + if config != nil { + internal = &internalConfig{ + limitCPUToMemoryRatio: float64(config.LimitCPUToMemoryPercent) / 100, + cpuRequestToLimitRatio: float64(config.CPURequestToLimitPercent) / 100, + memoryRequestToLimitRatio: float64(config.MemoryRequestToLimitPercent) / 100, + } + } + + limitRanger, err := limitranger.NewLimitRanger(nil) + if err != nil { + return nil, err + } + + return &clusterResourceOverridePlugin{ + Handler: admission.NewHandler(admission.Create), + config: internal, + LimitRanger: limitRanger, + }, nil +} + +func (d *clusterResourceOverridePlugin) SetExternalKubeClientSet(c kubernetes.Interface) { + d.LimitRanger.SetExternalKubeClientSet(c) +} + +func (d *clusterResourceOverridePlugin) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + d.LimitRanger.SetExternalKubeInformerFactory(kubeInformers) + d.limitRangesLister = kubeInformers.Core().V1().LimitRanges().Lister() + d.nsLister = kubeInformers.Core().V1().Namespaces().Lister() +} + +func ReadConfig(configFile io.Reader) (*api.ClusterResourceOverrideConfig, error) { + obj, err := helpers.ReadYAMLToInternal(configFile, api.Install, v1.Install) + if err != nil { + klog.V(5).Infof("%s error reading config: %v", api.PluginName, err) + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*api.ClusterResourceOverrideConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + klog.V(5).Infof("%s config is: %v", api.PluginName, config) + if errs := validation.Validate(config); len(errs) > 0 { + return nil, errs.ToAggregate() + } + + return config, nil +} + +func (a *clusterResourceOverridePlugin) ValidateInitialization() error { + if a.nsLister == nil { + return fmt.Errorf("%s did not get a namespace lister", api.PluginName) + } + return a.LimitRanger.ValidateInitialization() +} + +// this a real shame to be special cased. +var ( + forbiddenNames = []string{"openshift", "kubernetes", "kube"} + forbiddenPrefixes = []string{"openshift-", "kubernetes-", "kube-"} +) + +func isExemptedNamespace(name string) bool { + for _, s := range forbiddenNames { + if name == s { + return true + } + } + for _, s := range forbiddenPrefixes { + if strings.HasPrefix(name, s) { + return true + } + } + return false +} + +func (a *clusterResourceOverridePlugin) Admit(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + return a.admit(ctx, attr, true, o) +} + +func (a *clusterResourceOverridePlugin) Validate(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + return a.admit(ctx, attr, false, o) +} + +// TODO this will need to update when we have pod requests/limits +func (a *clusterResourceOverridePlugin) admit(ctx context.Context, attr admission.Attributes, mutationAllowed bool, o admission.ObjectInterfaces) error { + klog.V(6).Infof("%s admission controller is invoked", api.PluginName) + if a.config == nil || attr.GetResource().GroupResource() != coreapi.Resource("pods") || attr.GetSubresource() != "" { + return nil // not applicable + } + pod, ok := attr.GetObject().(*coreapi.Pod) + if !ok { + return admission.NewForbidden(attr, fmt.Errorf("unexpected object: %#v", attr.GetObject())) + } + klog.V(5).Infof("%s is looking at creating pod %s in project %s", api.PluginName, pod.Name, attr.GetNamespace()) + + // allow annotations on project to override + ns, err := a.nsLister.Get(attr.GetNamespace()) + if err != nil { + klog.Warningf("%s got an error retrieving namespace: %v", api.PluginName, err) + return admission.NewForbidden(attr, err) // this should not happen though + } + + projectEnabledPlugin, exists := ns.Annotations[clusterResourceOverrideAnnotation] + if exists && projectEnabledPlugin != "true" { + klog.V(5).Infof("%s is disabled for project %s", api.PluginName, attr.GetNamespace()) + return nil // disabled for this project, do nothing + } + + if isExemptedNamespace(ns.Name) { + klog.V(5).Infof("%s is skipping exempted project %s", api.PluginName, attr.GetNamespace()) + return nil // project is exempted, do nothing + } + + namespaceLimits := []*corev1.LimitRange{} + + if a.limitRangesLister != nil { + limits, err := a.limitRangesLister.LimitRanges(attr.GetNamespace()).List(labels.Everything()) + if err != nil { + return err + } + namespaceLimits = limits + } + + // Don't mutate resource requirements below the namespace + // limit minimums. + nsCPUFloor := minResourceLimits(namespaceLimits, corev1.ResourceCPU) + nsMemFloor := minResourceLimits(namespaceLimits, corev1.ResourceMemory) + + // Reuse LimitRanger logic to apply limit/req defaults from the project. Ignore validation + // errors, assume that LimitRanger will run after this plugin to validate. + klog.V(5).Infof("%s: initial pod limits are: %#v", api.PluginName, pod.Spec) + if err := a.LimitRanger.Admit(ctx, attr, o); err != nil { + klog.V(5).Infof("%s: error from LimitRanger: %#v", api.PluginName, err) + } + klog.V(5).Infof("%s: pod limits after LimitRanger: %#v", api.PluginName, pod.Spec) + for i := range pod.Spec.InitContainers { + if err := updateContainerResources(a.config, &pod.Spec.InitContainers[i], nsCPUFloor, nsMemFloor, mutationAllowed); err != nil { + return admission.NewForbidden(attr, fmt.Errorf("spec.initContainers[%d].%v", i, err)) + } + } + for i := range pod.Spec.Containers { + if err := updateContainerResources(a.config, &pod.Spec.Containers[i], nsCPUFloor, nsMemFloor, mutationAllowed); err != nil { + return admission.NewForbidden(attr, fmt.Errorf("spec.containers[%d].%v", i, err)) + } + } + klog.V(5).Infof("%s: pod limits after overrides are: %#v", api.PluginName, pod.Spec) + return nil +} + +func updateContainerResources(config *internalConfig, container *coreapi.Container, nsCPUFloor, nsMemFloor *resource.Quantity, mutationAllowed bool) error { + resources := container.Resources + memLimit, memFound := resources.Limits[coreapi.ResourceMemory] + if memFound && config.memoryRequestToLimitRatio != 0 { + // memory is measured in whole bytes. + // the plugin rounds down to the nearest MiB rather than bytes to improve ease of use for end-users. + amount := memLimit.Value() * int64(config.memoryRequestToLimitRatio*100) / 100 + // TODO: move into resource.Quantity + var mod int64 + switch memLimit.Format { + case resource.BinarySI: + mod = 1024 * 1024 + default: + mod = 1000 * 1000 + } + if rem := amount % mod; rem != 0 { + amount = amount - rem + } + q := resource.NewQuantity(int64(amount), memLimit.Format) + if memFloor.Cmp(*q) > 0 { + clone := memFloor.DeepCopy() + q = &clone + } + if nsMemFloor != nil && q.Cmp(*nsMemFloor) < 0 { + klog.V(5).Infof("%s: %s pod limit %q below namespace limit; setting limit to %q", api.PluginName, corev1.ResourceMemory, q.String(), nsMemFloor.String()) + clone := nsMemFloor.DeepCopy() + q = &clone + } + if err := applyQuantity(resources.Requests, corev1.ResourceMemory, *q, mutationAllowed); err != nil { + return fmt.Errorf("resources.requests.%s %v", corev1.ResourceMemory, err) + } + } + if memFound && config.limitCPUToMemoryRatio != 0 { + amount := float64(memLimit.Value()) * config.limitCPUToMemoryRatio * cpuBaseScaleFactor + q := resource.NewMilliQuantity(int64(amount), resource.DecimalSI) + if cpuFloor.Cmp(*q) > 0 { + clone := cpuFloor.DeepCopy() + q = &clone + } + if nsCPUFloor != nil && q.Cmp(*nsCPUFloor) < 0 { + klog.V(5).Infof("%s: %s pod limit %q below namespace limit; setting limit to %q", api.PluginName, corev1.ResourceCPU, q.String(), nsCPUFloor.String()) + clone := nsCPUFloor.DeepCopy() + q = &clone + } + if err := applyQuantity(resources.Limits, corev1.ResourceCPU, *q, mutationAllowed); err != nil { + return fmt.Errorf("resources.limits.%s %v", corev1.ResourceCPU, err) + } + } + + cpuLimit, cpuFound := resources.Limits[coreapi.ResourceCPU] + if cpuFound && config.cpuRequestToLimitRatio != 0 { + amount := float64(cpuLimit.MilliValue()) * config.cpuRequestToLimitRatio + q := resource.NewMilliQuantity(int64(amount), cpuLimit.Format) + if cpuFloor.Cmp(*q) > 0 { + clone := cpuFloor.DeepCopy() + q = &clone + } + if nsCPUFloor != nil && q.Cmp(*nsCPUFloor) < 0 { + klog.V(5).Infof("%s: %s pod limit %q below namespace limit; setting limit to %q", api.PluginName, corev1.ResourceCPU, q.String(), nsCPUFloor.String()) + clone := nsCPUFloor.DeepCopy() + q = &clone + } + if err := applyQuantity(resources.Requests, corev1.ResourceCPU, *q, mutationAllowed); err != nil { + return fmt.Errorf("resources.requests.%s %v", corev1.ResourceCPU, err) + } + } + + return nil +} + +func applyQuantity(l coreapi.ResourceList, r corev1.ResourceName, v resource.Quantity, mutationAllowed bool) error { + if mutationAllowed { + l[coreapi.ResourceName(r)] = v + return nil + } + + if oldValue, ok := l[coreapi.ResourceName(r)]; !ok { + return fmt.Errorf("mutated, expected: %v, now absent", v) + } else if oldValue.Cmp(v) != 0 { + return fmt.Errorf("mutated, expected: %v, got %v", v, oldValue) + } + + return nil +} + +// minResourceLimits finds the Min limit for resourceName. Nil is +// returned if limitRanges is empty or limits contains no resourceName +// limits. +func minResourceLimits(limitRanges []*corev1.LimitRange, resourceName corev1.ResourceName) *resource.Quantity { + limits := []*resource.Quantity{} + + for _, limitRange := range limitRanges { + for _, limit := range limitRange.Spec.Limits { + if limit.Type == corev1.LimitTypeContainer { + if limit, found := limit.Min[resourceName]; found { + clone := limit.DeepCopy() + limits = append(limits, &clone) + } + } + } + } + + if len(limits) == 0 { + return nil + } + + return minQuantity(limits) +} + +func minQuantity(quantities []*resource.Quantity) *resource.Quantity { + min := quantities[0].DeepCopy() + + for i := range quantities { + if quantities[i].Cmp(min) < 0 { + min = quantities[i].DeepCopy() + } + } + + return &min +} diff --git a/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission_test.go b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission_test.go new file mode 100644 index 0000000000000..d1c54bb140aae --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission_test.go @@ -0,0 +1,507 @@ +package clusterresourceoverride + +import ( + "bytes" + "context" + "fmt" + "io" + "reflect" + "testing" + + "github.com/openshift/library-go/pkg/config/helpers" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride" + clusterresourceoverridev1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation" + kapi "k8s.io/kubernetes/pkg/apis/core" +) + +const ( + yamlConfig = ` +apiVersion: autoscaling.openshift.io/v1 +kind: ClusterResourceOverrideConfig +limitCPUToMemoryPercent: 100 +cpuRequestToLimitPercent: 10 +memoryRequestToLimitPercent: 25 +` + invalidConfig = ` +apiVersion: autoscaling.openshift.io/v1 +kind: ClusterResourceOverrideConfig +cpuRequestToLimitPercent: 200 +` + invalidConfig2 = ` +apiVersion: autoscaling.openshift.io/v1 +kind: ClusterResourceOverrideConfig +` +) + +var ( + deserializedYamlConfig = &clusterresourceoverride.ClusterResourceOverrideConfig{ + LimitCPUToMemoryPercent: 100, + CPURequestToLimitPercent: 10, + MemoryRequestToLimitPercent: 25, + } +) + +func TestConfigReader(t *testing.T) { + initialConfig := testConfig(10, 20, 30) + serializedConfig, serializationErr := helpers.WriteYAML(initialConfig, clusterresourceoverridev1.Install) + if serializationErr != nil { + t.Fatalf("WriteYAML: config serialize failed: %v", serializationErr) + } + + tests := []struct { + name string + config io.Reader + expectErr bool + expectNil bool + expectInvalid bool + expectedConfig *clusterresourceoverride.ClusterResourceOverrideConfig + }{ + { + name: "process nil config", + config: nil, + expectNil: true, + }, { + name: "deserialize initialConfig yaml", + config: bytes.NewReader(serializedConfig), + expectedConfig: initialConfig, + }, { + name: "completely broken config", + config: bytes.NewReader([]byte("asdfasdfasdF")), + expectErr: true, + }, { + name: "deserialize yamlConfig", + config: bytes.NewReader([]byte(yamlConfig)), + expectedConfig: deserializedYamlConfig, + }, { + name: "choke on out-of-bounds ratio", + config: bytes.NewReader([]byte(invalidConfig)), + expectInvalid: true, + expectErr: true, + }, { + name: "complain about no settings", + config: bytes.NewReader([]byte(invalidConfig2)), + expectInvalid: true, + expectErr: true, + }, + } + for _, test := range tests { + config, err := ReadConfig(test.config) + if test.expectErr && err == nil { + t.Errorf("%s: expected error", test.name) + } else if !test.expectErr && err != nil { + t.Errorf("%s: expected no error, saw %v", test.name, err) + } + if err == nil { + if test.expectNil && config != nil { + t.Errorf("%s: expected nil config, but saw: %v", test.name, config) + } else if !test.expectNil && config == nil { + t.Errorf("%s: expected config, but got nil", test.name) + } + } + if config != nil { + if test.expectedConfig != nil && *test.expectedConfig != *config { + t.Errorf("%s: expected %v from reader, but got %v", test.name, test.expectErr, config) + } + if err := validation.Validate(config); test.expectInvalid && len(err) == 0 { + t.Errorf("%s: expected validation to fail, but it passed", test.name) + } else if !test.expectInvalid && len(err) > 0 { + t.Errorf("%s: expected validation to pass, but it failed with %v", test.name, err) + } + } + } +} + +func TestLimitRequestAdmission(t *testing.T) { + tests := []struct { + name string + config *clusterresourceoverride.ClusterResourceOverrideConfig + pod *kapi.Pod + expectedMemRequest resource.Quantity + expectedCpuLimit resource.Quantity + expectedCpuRequest resource.Quantity + namespace *corev1.Namespace + namespaceLimits []*corev1.LimitRange + }{ + { + name: "ignore pods that have no memory limit specified", + config: testConfig(100, 50, 50), + pod: testBestEffortPod(), + expectedMemRequest: resource.MustParse("0"), + expectedCpuLimit: resource.MustParse("0"), + expectedCpuRequest: resource.MustParse("0"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, ignore pods that have no memory limit specified", + config: testConfig(100, 50, 50), + pod: testBestEffortPod(), + expectedMemRequest: resource.MustParse("0"), + expectedCpuLimit: resource.MustParse("0"), + expectedCpuRequest: resource.MustParse("0"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("567m"), + fakeMinCPULimitRange("678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "test floor for memory and cpu", + config: testConfig(100, 50, 50), + pod: testPod("1Mi", "0", "0", "0"), + expectedMemRequest: resource.MustParse("1Mi"), + expectedCpuLimit: resource.MustParse("1m"), + expectedCpuRequest: resource.MustParse("1m"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, test floor for memory and cpu", + config: testConfig(100, 50, 50), + pod: testPod("1Mi", "0", "0", "0"), + expectedMemRequest: resource.MustParse("456Gi"), + expectedCpuLimit: resource.MustParse("567m"), + expectedCpuRequest: resource.MustParse("567m"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("567m"), + fakeMinCPULimitRange("678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "nil config", + config: nil, + pod: testPod("1", "1", "1", "1"), + expectedMemRequest: resource.MustParse("1"), + expectedCpuLimit: resource.MustParse("1"), + expectedCpuRequest: resource.MustParse("1"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, nil config", + config: nil, + pod: testPod("1", "1", "1", "1"), + expectedMemRequest: resource.MustParse("1"), + expectedCpuLimit: resource.MustParse("1"), + expectedCpuRequest: resource.MustParse("1"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("567m"), + fakeMinCPULimitRange("678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "all values are adjusted", + config: testConfig(100, 50, 50), + pod: testPod("1Gi", "0", "2000m", "0"), + expectedMemRequest: resource.MustParse("512Mi"), + expectedCpuLimit: resource.MustParse("1"), + expectedCpuRequest: resource.MustParse("500m"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, all values are adjusted to floor of namespace limits", + config: testConfig(100, 50, 50), + pod: testPod("1Gi", "0", "2000m", "0"), + expectedMemRequest: resource.MustParse("456Gi"), + expectedCpuLimit: resource.MustParse("10567m"), + expectedCpuRequest: resource.MustParse("10567m"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("10567m"), + fakeMinCPULimitRange("20678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "just requests are adjusted", + config: testConfig(0, 50, 50), + pod: testPod("10Mi", "0", "50m", "0"), + expectedMemRequest: resource.MustParse("5Mi"), + expectedCpuLimit: resource.MustParse("50m"), + expectedCpuRequest: resource.MustParse("25m"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, all requests are adjusted to floor of namespace limits", + config: testConfig(0, 50, 50), + pod: testPod("10Mi", "0", "50m", "0"), + expectedMemRequest: resource.MustParse("456Gi"), + expectedCpuLimit: resource.MustParse("50m"), + expectedCpuRequest: resource.MustParse("10567m"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("10567m"), + fakeMinCPULimitRange("20678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "project annotation disables overrides", + config: testConfig(0, 50, 50), + pod: testPod("10Mi", "0", "50m", "0"), + expectedMemRequest: resource.MustParse("0"), + expectedCpuLimit: resource.MustParse("50m"), + expectedCpuRequest: resource.MustParse("0"), + namespace: fakeNamespace(false), + }, + { + name: "with namespace limits, project annotation disables overrides", + config: testConfig(0, 50, 50), + pod: testPod("10Mi", "0", "50m", "0"), + expectedMemRequest: resource.MustParse("0"), + expectedCpuLimit: resource.MustParse("50m"), + expectedCpuRequest: resource.MustParse("0"), + namespace: fakeNamespace(false), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("10567m"), + fakeMinCPULimitRange("20678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "large values don't overflow", + config: testConfig(100, 50, 50), + pod: testPod("1Ti", "0", "0", "0"), + expectedMemRequest: resource.MustParse("512Gi"), + expectedCpuLimit: resource.MustParse("1024"), + expectedCpuRequest: resource.MustParse("512"), + namespace: fakeNamespace(true), + }, + { + name: "little values mess things up", + config: testConfig(500, 10, 10), + pod: testPod("1.024Mi", "0", "0", "0"), + expectedMemRequest: resource.MustParse("1Mi"), + expectedCpuLimit: resource.MustParse("5m"), + expectedCpuRequest: resource.MustParse("1m"), + namespace: fakeNamespace(true), + }, + { + name: "test fractional memory requests round up", + config: testConfig(500, 10, 60), + pod: testPod("512Mi", "0", "0", "0"), + expectedMemRequest: resource.MustParse("307Mi"), + expectedCpuLimit: resource.MustParse("2.5"), + expectedCpuRequest: resource.MustParse("250m"), + namespace: fakeNamespace(true), + }, + { + name: "test only containers types are considered with namespace limits", + config: testConfig(100, 50, 50), + pod: testPod("1Gi", "0", "2000m", "0"), + expectedMemRequest: resource.MustParse("512Mi"), + expectedCpuLimit: resource.MustParse("1"), + expectedCpuRequest: resource.MustParse("500m"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinStorageLimitRange("1567Mi"), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + c, err := newClusterResourceOverride(test.config) + if err != nil { + t.Fatalf("%s: config de/serialize failed: %v", test.name, err) + } + // Override LimitRanger with limits from test case + c.(*clusterResourceOverridePlugin).limitRangesLister = fakeLimitRangeLister{ + namespaceLister: fakeLimitRangeNamespaceLister{ + limits: test.namespaceLimits, + }, + } + c.(*clusterResourceOverridePlugin).nsLister = fakeNamespaceLister(test.namespace) + attrs := admission.NewAttributesRecord(test.pod, nil, schema.GroupVersionKind{}, test.namespace.Name, "name", kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, fakeUser()) + clone := test.pod.DeepCopy() + if err = c.(admission.MutationInterface).Admit(context.TODO(), attrs, nil); err != nil { + t.Fatalf("%s: admission controller returned error: %v", test.name, err) + } + if err = c.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil); err != nil { + t.Fatalf("%s: admission controller returned error: %v", test.name, err) + } + + if !reflect.DeepEqual(test.pod, clone) { + attrs := admission.NewAttributesRecord(clone, nil, schema.GroupVersionKind{}, test.namespace.Name, "name", kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, fakeUser()) + if err = c.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil); err == nil { + t.Fatalf("%s: admission controller returned no error, but should", test.name) + } + } + + resources := test.pod.Spec.InitContainers[0].Resources // only test one container + if actual := resources.Requests[kapi.ResourceMemory]; test.expectedMemRequest.Cmp(actual) != 0 { + t.Errorf("%s: memory requests do not match; %v should be %v", test.name, actual, test.expectedMemRequest) + } + if actual := resources.Requests[kapi.ResourceCPU]; test.expectedCpuRequest.Cmp(actual) != 0 { + t.Errorf("%s: cpu requests do not match; %v should be %v", test.name, actual, test.expectedCpuRequest) + } + if actual := resources.Limits[kapi.ResourceCPU]; test.expectedCpuLimit.Cmp(actual) != 0 { + t.Errorf("%s: cpu limits do not match; %v should be %v", test.name, actual, test.expectedCpuLimit) + } + + resources = test.pod.Spec.Containers[0].Resources // only test one container + if actual := resources.Requests[kapi.ResourceMemory]; test.expectedMemRequest.Cmp(actual) != 0 { + t.Errorf("%s: memory requests do not match; %v should be %v", test.name, actual, test.expectedMemRequest) + } + if actual := resources.Requests[kapi.ResourceCPU]; test.expectedCpuRequest.Cmp(actual) != 0 { + t.Errorf("%s: cpu requests do not match; %v should be %v", test.name, actual, test.expectedCpuRequest) + } + if actual := resources.Limits[kapi.ResourceCPU]; test.expectedCpuLimit.Cmp(actual) != 0 { + t.Errorf("%s: cpu limits do not match; %v should be %v", test.name, actual, test.expectedCpuLimit) + } + }) + } +} + +func testBestEffortPod() *kapi.Pod { + return &kapi.Pod{ + Spec: kapi.PodSpec{ + InitContainers: []kapi.Container{ + { + Resources: kapi.ResourceRequirements{}, + }, + }, + Containers: []kapi.Container{ + { + Resources: kapi.ResourceRequirements{}, + }, + }, + }, + } +} + +func testPod(memLimit string, memRequest string, cpuLimit string, cpuRequest string) *kapi.Pod { + return &kapi.Pod{ + Spec: kapi.PodSpec{ + InitContainers: []kapi.Container{ + { + Resources: kapi.ResourceRequirements{ + Limits: kapi.ResourceList{ + kapi.ResourceCPU: resource.MustParse(cpuLimit), + kapi.ResourceMemory: resource.MustParse(memLimit), + }, + Requests: kapi.ResourceList{ + kapi.ResourceCPU: resource.MustParse(cpuRequest), + kapi.ResourceMemory: resource.MustParse(memRequest), + }, + }, + }, + }, + Containers: []kapi.Container{ + { + Resources: kapi.ResourceRequirements{ + Limits: kapi.ResourceList{ + kapi.ResourceCPU: resource.MustParse(cpuLimit), + kapi.ResourceMemory: resource.MustParse(memLimit), + }, + Requests: kapi.ResourceList{ + kapi.ResourceCPU: resource.MustParse(cpuRequest), + kapi.ResourceMemory: resource.MustParse(memRequest), + }, + }, + }, + }, + }, + } +} + +func fakeUser() user.Info { + return &user.DefaultInfo{ + Name: "testuser", + } +} + +var nsIndex = 0 + +func fakeNamespace(pluginEnabled bool) *corev1.Namespace { + nsIndex++ + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("fakeNS%d", nsIndex), + Annotations: map[string]string{}, + }, + } + if !pluginEnabled { + ns.Annotations[clusterResourceOverrideAnnotation] = "false" + } + return ns +} + +func fakeNamespaceLister(ns *corev1.Namespace) corev1listers.NamespaceLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + indexer.Add(ns) + return corev1listers.NewNamespaceLister(indexer) +} + +func testConfig(lc2mr int64, cr2lr int64, mr2lr int64) *clusterresourceoverride.ClusterResourceOverrideConfig { + return &clusterresourceoverride.ClusterResourceOverrideConfig{ + LimitCPUToMemoryPercent: lc2mr, + CPURequestToLimitPercent: cr2lr, + MemoryRequestToLimitPercent: mr2lr, + } +} + +func fakeMinLimitRange(limitType corev1.LimitType, resourceType corev1.ResourceName, limits ...string) *corev1.LimitRange { + r := &corev1.LimitRange{} + + for i := range limits { + rl := corev1.ResourceList{} + rl[resourceType] = resource.MustParse(limits[i]) + r.Spec.Limits = append(r.Spec.Limits, + corev1.LimitRangeItem{ + Type: limitType, + Min: rl, + }, + ) + } + + return r +} + +func fakeMinMemoryLimitRange(limits ...string) *corev1.LimitRange { + return fakeMinLimitRange(corev1.LimitTypeContainer, corev1.ResourceMemory, limits...) +} + +func fakeMinCPULimitRange(limits ...string) *corev1.LimitRange { + return fakeMinLimitRange(corev1.LimitTypeContainer, corev1.ResourceCPU, limits...) +} + +func fakeMinStorageLimitRange(limits ...string) *corev1.LimitRange { + return fakeMinLimitRange(corev1.LimitTypePersistentVolumeClaim, corev1.ResourceStorage, limits...) +} + +type fakeLimitRangeLister struct { + corev1listers.LimitRangeLister + namespaceLister fakeLimitRangeNamespaceLister +} + +type fakeLimitRangeNamespaceLister struct { + corev1listers.LimitRangeNamespaceLister + limits []*corev1.LimitRange +} + +func (f fakeLimitRangeLister) LimitRanges(namespace string) corev1listers.LimitRangeNamespaceLister { + return f.namespaceLister +} + +func (f fakeLimitRangeNamespaceLister) List(selector labels.Selector) ([]*corev1.LimitRange, error) { + return f.limits, nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/doc.go b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/doc.go new file mode 100644 index 0000000000000..aaf2176af054a --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/doc.go @@ -0,0 +1,8 @@ +package clusterresourceoverride + +// The ClusterResourceOverride plugin is only active when admission control config is supplied for it. +// The plugin allows administrators to override user-provided container request/limit values +// in order to control overcommit and optionally pin CPU to memory. +// The plugin's actions can be disabled per-project with the project annotation +// autoscaling.openshift.io/cluster-resource-override-enabled="false", so cluster admins +// can exempt infrastructure projects and such from the overrides. diff --git a/openshift-kube-apiserver/admission/autoscaling/managednode/admission.go b/openshift-kube-apiserver/admission/autoscaling/managednode/admission.go new file mode 100644 index 0000000000000..d89c6423a05eb --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/managednode/admission.go @@ -0,0 +1,136 @@ +package managednode + +import ( + "context" + "fmt" + "io" + "strings" + + configv1 "github.com/openshift/api/config/v1" + configv1informer "github.com/openshift/client-go/config/informers/externalversions/config/v1" + configv1listers "github.com/openshift/client-go/config/listers/config/v1" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/kubernetes/pkg/kubelet/managed" + + corev1 "k8s.io/api/core/v1" + coreapi "k8s.io/kubernetes/pkg/apis/core" + + "k8s.io/client-go/kubernetes" +) + +const ( + PluginName = "autoscaling.openshift.io/ManagedNode" + // infraClusterName contains the name of the cluster infrastructure resource + infraClusterName = "cluster" +) + +var _ = initializer.WantsExternalKubeClientSet(&managedNodeValidate{}) +var _ = admission.ValidationInterface(&managedNodeValidate{}) +var _ = WantsInfraInformer(&managedNodeValidate{}) + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, + func(_ io.Reader) (admission.Interface, error) { + return &managedNodeValidate{ + Handler: admission.NewHandler(admission.Create, admission.Update), + }, nil + }) +} + +type managedNodeValidate struct { + *admission.Handler + client kubernetes.Interface + infraConfigLister configv1listers.InfrastructureLister + infraConfigListSynced func() bool +} + +// SetExternalKubeClientSet implements the WantsExternalKubeClientSet interface. +func (a *managedNodeValidate) SetExternalKubeClientSet(client kubernetes.Interface) { + a.client = client +} + +func (a *managedNodeValidate) SetInfraInformer(informer configv1informer.InfrastructureInformer) { + a.infraConfigLister = informer.Lister() + a.infraConfigListSynced = informer.Informer().HasSynced +} + +func (a *managedNodeValidate) ValidateInitialization() error { + if a.client == nil { + return fmt.Errorf("%s plugin needs a kubernetes client", PluginName) + } + if a.infraConfigLister == nil { + return fmt.Errorf("%s did not get a config infrastructure lister", PluginName) + } + if a.infraConfigListSynced == nil { + return fmt.Errorf("%s plugin needs a config infrastructure lister synced", PluginName) + } + return nil +} + +func (a *managedNodeValidate) Validate(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) (err error) { + if attr.GetResource().GroupResource() != corev1.Resource("nodes") || attr.GetSubresource() != "" { + return nil + } + + node, ok := attr.GetObject().(*coreapi.Node) + if !ok { + return admission.NewForbidden(attr, fmt.Errorf("unexpected object: %#v", attr.GetResource())) + } + + // infraConfigListSynced is expected to be thread-safe since the underlying call is to the standard + // informer HasSynced() function which is thread-safe. + if !a.infraConfigListSynced() { + return admission.NewForbidden(attr, fmt.Errorf("%s infra config cache not synchronized", PluginName)) + } + + clusterInfra, err := a.infraConfigLister.Get(infraClusterName) + if err != nil { + return admission.NewForbidden(attr, err) // can happen due to informer latency + } + + // Check if we are in CPU Partitioning mode for AllNodes + allErrs := validateClusterCPUPartitioning(clusterInfra.Status, node) + if len(allErrs) == 0 { + return nil + } + return errors.NewInvalid(attr.GetKind().GroupKind(), node.Name, allErrs) +} + +// validateClusterCPUPartitioning Make sure that we only check nodes when CPU Partitioning is turned on. +// We also need to account for Single Node upgrades, during that initial upgrade, NTO will update this field during +// upgrade to make it authoritative from that point on. A roll back will revert an SingleNode cluster back to it's normal cycle. +// Other installations will have this field set at install time, and can not be turned off. +// +// If CPUPartitioning == AllNodes and is not empty value, check nodes +func validateClusterCPUPartitioning(infraStatus configv1.InfrastructureStatus, node *coreapi.Node) field.ErrorList { + errorMessage := "node does not contain resource information, this is required for clusters with workload partitioning enabled" + var allErrs field.ErrorList + + if infraStatus.CPUPartitioning == configv1.CPUPartitioningAllNodes { + if !containsCPUResource(node.Status.Capacity) { + allErrs = append(allErrs, getNodeInvalidWorkloadResourceError("capacity", errorMessage)) + } + if !containsCPUResource(node.Status.Allocatable) { + allErrs = append(allErrs, getNodeInvalidWorkloadResourceError("allocatable", errorMessage)) + } + } + + return allErrs +} + +func containsCPUResource(resources coreapi.ResourceList) bool { + for k := range resources { + if strings.Contains(k.String(), managed.WorkloadsCapacitySuffix) { + return true + } + } + return false +} + +func getNodeInvalidWorkloadResourceError(resourcePool, message string) *field.Error { + return field.Required(field.NewPath("status", resourcePool, managed.WorkloadsCapacitySuffix), message) +} diff --git a/openshift-kube-apiserver/admission/autoscaling/managednode/admission_test.go b/openshift-kube-apiserver/admission/autoscaling/managednode/admission_test.go new file mode 100644 index 0000000000000..8a1c0157a5d5d --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/managednode/admission_test.go @@ -0,0 +1,128 @@ +package managednode + +import ( + "context" + "fmt" + "testing" + + configv1 "github.com/openshift/api/config/v1" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + + configv1listers "github.com/openshift/client-go/config/listers/config/v1" + + corev1 "k8s.io/api/core/v1" + kapi "k8s.io/kubernetes/pkg/apis/core" +) + +const ( + managedCapacityLabel = "management.workload.openshift.io/cores" +) + +func TestAdmit(t *testing.T) { + tests := []struct { + name string + node *corev1.Node + infra *configv1.Infrastructure + expectedError error + }{ + { + name: "should succeed when CPU partitioning is set to AllNodes", + node: testNodeWithManagementResource(true), + infra: testClusterInfra(configv1.CPUPartitioningAllNodes), + }, + { + name: "should succeed when CPU partitioning is set to None", + node: testNodeWithManagementResource(true), + infra: testClusterInfra(configv1.CPUPartitioningNone), + }, + { + name: "should fail when nodes don't have capacity", + node: testNodeWithManagementResource(false), + infra: testClusterInfra(configv1.CPUPartitioningAllNodes), + expectedError: fmt.Errorf("node does not contain resource information, this is required for clusters with workload partitioning enabled"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + m, err := getMockNode(test.infra) + if err != nil { + t.Fatalf("%s: failed to get mock managementNode: %v", test.name, err) + } + + attrs := admission.NewAttributesRecord( + test.node, nil, schema.GroupVersionKind{}, + test.node.Namespace, test.node.Name, kapi.Resource("nodes").WithVersion("version"), "", + admission.Create, nil, false, fakeUser()) + err = m.Validate(context.TODO(), attrs, nil) + + if err == nil && test.expectedError != nil { + t.Fatalf("%s: the expected error %v, got nil", test.name, test.expectedError) + } + }) + } +} + +func testNodeWithManagementResource(capacity bool) *corev1.Node { + q := resource.NewQuantity(16000, resource.DecimalSI) + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "managed-node", + }, + } + if capacity { + node.Status.Capacity = corev1.ResourceList{ + managedCapacityLabel: *q, + } + } + return node +} + +func testClusterInfra(mode configv1.CPUPartitioningMode) *configv1.Infrastructure { + return &configv1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{ + Name: infraClusterName, + }, + Status: configv1.InfrastructureStatus{ + APIServerURL: "test", + ControlPlaneTopology: configv1.HighlyAvailableTopologyMode, + InfrastructureTopology: configv1.HighlyAvailableTopologyMode, + CPUPartitioning: mode, + }, + } +} + +func fakeUser() user.Info { + return &user.DefaultInfo{ + Name: "testuser", + } +} + +func getMockNode(infra *configv1.Infrastructure) (*managedNodeValidate, error) { + m := &managedNodeValidate{ + Handler: admission.NewHandler(admission.Create), + client: &fake.Clientset{}, + infraConfigLister: fakeInfraConfigLister(infra), + infraConfigListSynced: func() bool { return true }, + } + if err := m.ValidateInitialization(); err != nil { + return nil, err + } + + return m, nil +} + +func fakeInfraConfigLister(infra *configv1.Infrastructure) configv1listers.InfrastructureLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + if infra != nil { + _ = indexer.Add(infra) + } + return configv1listers.NewInfrastructureLister(indexer) +} diff --git a/openshift-kube-apiserver/admission/autoscaling/managednode/initializers.go b/openshift-kube-apiserver/admission/autoscaling/managednode/initializers.go new file mode 100644 index 0000000000000..512a5f8d031c0 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/managednode/initializers.go @@ -0,0 +1,28 @@ +package managednode + +import ( + "k8s.io/apiserver/pkg/admission" + + configv1informer "github.com/openshift/client-go/config/informers/externalversions/config/v1" +) + +func NewInitializer(infraInformer configv1informer.InfrastructureInformer) admission.PluginInitializer { + return &localInitializer{infraInformer: infraInformer} +} + +type WantsInfraInformer interface { + SetInfraInformer(informer configv1informer.InfrastructureInformer) + admission.InitializationValidator +} + +type localInitializer struct { + infraInformer configv1informer.InfrastructureInformer +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsInfraInformer); ok { + wants.SetInfraInformer(i.infraInformer) + } +} diff --git a/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go new file mode 100644 index 0000000000000..c672aeced20a2 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go @@ -0,0 +1,639 @@ +package managementcpusoverride + +import ( + "context" + "encoding/json" + "fmt" + "io" + "reflect" + "strings" + "time" + + configv1 "github.com/openshift/api/config/v1" + configv1informer "github.com/openshift/client-go/config/informers/externalversions/config/v1" + configv1listers "github.com/openshift/client-go/config/listers/config/v1" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/warning" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + coreapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/kubelet/cm" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" +) + +const ( + PluginName = "autoscaling.openshift.io/ManagementCPUsOverride" + // timeToWaitForCacheSync contains the time how long to wait for caches to be synchronize + timeToWaitForCacheSync = 10 * time.Second + // containerWorkloadResourceSuffix contains the suffix for the container workload resource + containerWorkloadResourceSuffix = "workload.openshift.io/cores" + // podWorkloadTargetAnnotationPrefix contains the prefix for the pod workload target annotation + podWorkloadTargetAnnotationPrefix = "target.workload.openshift.io/" + // podWorkloadAnnotationEffect contains the effect key for the workload annotation value + podWorkloadAnnotationEffect = "effect" + // workloadEffectPreferredDuringScheduling contains the PreferredDuringScheduling effect value + workloadEffectPreferredDuringScheduling = "PreferredDuringScheduling" + // containerResourcesAnnotationPrefix contains resource annotation prefix that will be used by CRI-O to set cpu shares + containerResourcesAnnotationPrefix = "resources.workload.openshift.io/" + // containerResourcesAnnotationValueKeyCPUShares contains resource annotation value cpushares key + containerResourcesAnnotationValueKeyCPUShares = "cpushares" + // namespaceAllowedAnnotation contains the namespace allowed annotation key + namespaceAllowedAnnotation = "workload.openshift.io/allowed" + // workloadAdmissionWarning contains the admission warning annotation key + workloadAdmissionWarning = "workload.openshift.io/warning" + // infraClusterName contains the name of the cluster infrastructure resource + infraClusterName = "cluster" + // debugSourceResourceAnnotation contains the debug annotation that refers to the pod resource + debugSourceResourceAnnotation = "debug.openshift.io/source-resource" +) + +var _ = initializer.WantsExternalKubeInformerFactory(&managementCPUsOverride{}) +var _ = initializer.WantsExternalKubeClientSet(&managementCPUsOverride{}) +var _ = admission.MutationInterface(&managementCPUsOverride{}) +var _ = admission.ValidationInterface(&managementCPUsOverride{}) +var _ = WantsInfraInformer(&managementCPUsOverride{}) + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, + func(config io.Reader) (admission.Interface, error) { + return &managementCPUsOverride{ + Handler: admission.NewHandler(admission.Create), + }, nil + }) +} + +// managementCPUsOverride presents admission plugin that should replace pod container CPU requests with a new management resource. +// It applies to all pods that: +// 1. are in an allowed namespace +// 2. and have the workload annotation. +// +// It also sets the new management resource request and limit and set resource annotation that CRI-O can +// recognize and apply the relevant changes. +// For more information, see - https://github.com/openshift/enhancements/pull/703 +// +// Conditions for CPUs requests deletion: +// 1. The namespace should have allowed annotation "workload.openshift.io/allowed": "management" +// 2. The pod should have management annotation: "workload.openshift.io/management": "{"effect": "PreferredDuringScheduling"}" +// 3. All nodes under the cluster should have new management resource - "management.workload.openshift.io/cores" +// 4. The CPU request deletion will not change the pod QoS class +type managementCPUsOverride struct { + *admission.Handler + client kubernetes.Interface + nsLister corev1listers.NamespaceLister + nsListerSynced func() bool + nodeLister corev1listers.NodeLister + nodeListSynced func() bool + infraConfigLister configv1listers.InfrastructureLister + infraConfigListSynced func() bool +} + +func (a *managementCPUsOverride) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + a.nsLister = kubeInformers.Core().V1().Namespaces().Lister() + a.nsListerSynced = kubeInformers.Core().V1().Namespaces().Informer().HasSynced + a.nodeLister = kubeInformers.Core().V1().Nodes().Lister() + a.nodeListSynced = kubeInformers.Core().V1().Nodes().Informer().HasSynced +} + +// SetExternalKubeClientSet implements the WantsExternalKubeClientSet interface. +func (a *managementCPUsOverride) SetExternalKubeClientSet(client kubernetes.Interface) { + a.client = client +} + +func (a *managementCPUsOverride) SetInfraInformer(informer configv1informer.InfrastructureInformer) { + a.infraConfigLister = informer.Lister() + a.infraConfigListSynced = informer.Informer().HasSynced +} + +func (a *managementCPUsOverride) ValidateInitialization() error { + if a.client == nil { + return fmt.Errorf("%s plugin needs a kubernetes client", PluginName) + } + if a.nsLister == nil { + return fmt.Errorf("%s did not get a namespace lister", PluginName) + } + if a.nsListerSynced == nil { + return fmt.Errorf("%s plugin needs a namespace lister synced", PluginName) + } + if a.nodeLister == nil { + return fmt.Errorf("%s did not get a node lister", PluginName) + } + if a.nodeListSynced == nil { + return fmt.Errorf("%s plugin needs a node lister synced", PluginName) + } + if a.infraConfigLister == nil { + return fmt.Errorf("%s did not get a config infrastructure lister", PluginName) + } + if a.infraConfigListSynced == nil { + return fmt.Errorf("%s plugin needs a config infrastructure lister synced", PluginName) + } + return nil +} + +func (a *managementCPUsOverride) Admit(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + if attr.GetResource().GroupResource() != coreapi.Resource("pods") || attr.GetSubresource() != "" { + return nil + } + + pod, ok := attr.GetObject().(*coreapi.Pod) + if !ok { + return admission.NewForbidden(attr, fmt.Errorf("unexpected object: %#v", attr.GetObject())) + } + + // do not mutate mirror pods at all + if isStaticPod(pod.Annotations) { + return nil + } + + podAnnotations := map[string]string{} + for k, v := range pod.Annotations { + podAnnotations[k] = v + } + + // strip any resource annotations specified by a user + stripResourcesAnnotations(pod.Annotations) + // strip any workload annotation to prevent from underlying components(CRI-O, kubelet) to apply any changes + // according to the workload annotation + stripWorkloadAnnotations(pod.Annotations) + + workloadType, err := getWorkloadType(podAnnotations) + if err != nil { + invalidError := getPodInvalidWorkloadAnnotationError(podAnnotations, err.Error()) + return errors.NewInvalid(coreapi.Kind("Pod"), pod.Name, field.ErrorList{invalidError}) + } + + // no workload annotation is specified under the pod + if len(workloadType) == 0 { + return nil + } + + if !a.waitForSyncedStore(time.After(timeToWaitForCacheSync)) { + return admission.NewForbidden(attr, fmt.Errorf("%s node or namespace or infra config cache not synchronized", PluginName)) + } + + nodes, err := a.nodeLister.List(labels.Everything()) + if err != nil { + return admission.NewForbidden(attr, err) // can happen due to informer latency + } + + // we still need to have nodes under the cluster to decide if the management resource enabled or not + if len(nodes) == 0 { + return admission.NewForbidden(attr, fmt.Errorf("%s the cluster does not have any nodes", PluginName)) + } + + clusterInfra, err := a.infraConfigLister.Get(infraClusterName) + if err != nil { + return admission.NewForbidden(attr, err) // can happen due to informer latency + } + + // the infrastructure status is empty, so we can not decide the cluster type + if reflect.DeepEqual(clusterInfra.Status, configv1.InfrastructureStatus{}) { + return admission.NewForbidden(attr, fmt.Errorf("%s infrastructure resource has empty status", PluginName)) + } + + // the infrastructure status is not empty, but topology related fields do not have any values indicates that + // the cluster is during the roll-back process to the version that does not support the topology fields + // the upgrade to 4.8 handled by the CR defaulting + if clusterInfra.Status.ControlPlaneTopology == "" && clusterInfra.Status.InfrastructureTopology == "" { + return nil + } + + // Check if we are in CPU Partitioning mode for AllNodes + if !isCPUPartitioning(clusterInfra.Status, nodes, workloadType) { + return nil + } + + // allow annotations on project to override management pods CPUs requests + ns, err := a.getPodNamespace(attr) + if err != nil { + return err + } + + if !doesNamespaceAllowWorkloadType(ns.Annotations, workloadType) { + return admission.NewForbidden(attr, fmt.Errorf("%s the pod namespace %q does not allow the workload type %s", PluginName, ns.Name, workloadType)) + } + + workloadAnnotation := fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadType) + effect, err := getWorkloadAnnotationEffect(podAnnotations[workloadAnnotation]) + if err != nil { + invalidError := getPodInvalidWorkloadAnnotationError(podAnnotations, fmt.Sprintf("failed to get workload annotation effect: %v", err)) + return errors.NewInvalid(coreapi.Kind("Pod"), pod.Name, field.ErrorList{invalidError}) + } + + // TODO: currently we support only PreferredDuringScheduling effect + if effect != workloadEffectPreferredDuringScheduling { + invalidError := getPodInvalidWorkloadAnnotationError(podAnnotations, fmt.Sprintf("only %q effect is supported", workloadEffectPreferredDuringScheduling)) + return errors.NewInvalid(coreapi.Kind("Pod"), pod.Name, field.ErrorList{invalidError}) + } + + allContainers := append([]coreapi.Container{}, pod.Spec.InitContainers...) + allContainers = append(allContainers, pod.Spec.Containers...) + podQoSClass := getPodQoSClass(allContainers) + + // we do not want to change guaranteed pods resource allocation, because it should be managed by + // relevant managers(CPU and memory) under the kubelet + if podQoSClass == coreapi.PodQOSGuaranteed { + pod.Annotations[workloadAdmissionWarning] = "skip pod CPUs requests modifications because it has guaranteed QoS class" + return nil + } + + // we should skip mutation of the pod that has container with both CPU limit and request because once we will remove + // the request, the defaulter will set the request back with the CPU limit value + if podHasBothCPULimitAndRequest(allContainers) { + pod.Annotations[workloadAdmissionWarning] = "skip pod CPUs requests modifications because pod container has both CPU limit and request" + return nil + } + + // before we update the pod available under admission attributes, we need to verify that deletion of the CPU request + // will not change the pod QoS class, otherwise skip pod mutation + // 1. Copy the pod + // 2. Delete CPUs requests for all containers under the pod + // 3. Get modified pod QoS class + // 4. Verify that the pod QoS class before and after the modification stay the same + // 5. Update the pod under admission attributes + podCopy := pod.DeepCopy() + updatePodResources(podCopy, workloadType, podQoSClass) + + allContainersCopy := append([]coreapi.Container{}, podCopy.Spec.InitContainers...) + allContainersCopy = append(allContainersCopy, podCopy.Spec.Containers...) + podQoSClassAfterModification := getPodQoSClass(allContainersCopy) + + if podQoSClass != podQoSClassAfterModification { + pod.Annotations[workloadAdmissionWarning] = fmt.Sprintf("skip pod CPUs requests modifications because it will change the pod QoS class from %s to %s", podQoSClass, podQoSClassAfterModification) + return nil + } + + updatePodResources(pod, workloadType, podQoSClass) + + return nil +} + +func isCPUPartitioning(infraStatus configv1.InfrastructureStatus, nodes []*corev1.Node, workloadType string) bool { + // If status is not for CPU partitioning and we're single node we also check nodes to support upgrade event + // TODO: This should not be needed after 4.13 as all clusters after should have this feature on at install time, or updated by migration in NTO. + if infraStatus.CPUPartitioning != configv1.CPUPartitioningAllNodes && infraStatus.ControlPlaneTopology == configv1.SingleReplicaTopologyMode { + managedResource := fmt.Sprintf("%s.%s", workloadType, containerWorkloadResourceSuffix) + for _, node := range nodes { + // We only expect a single node to exist, so we return on first hit + if _, ok := node.Status.Allocatable[corev1.ResourceName(managedResource)]; ok { + return true + } + } + } + return infraStatus.CPUPartitioning == configv1.CPUPartitioningAllNodes +} + +func (a *managementCPUsOverride) getPodNamespace(attr admission.Attributes) (*corev1.Namespace, error) { + ns, err := a.nsLister.Get(attr.GetNamespace()) + if err == nil { + return ns, nil + } + + if !errors.IsNotFound(err) { + return nil, admission.NewForbidden(attr, err) + } + + // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not + ns, err = a.client.CoreV1().Namespaces().Get(context.TODO(), attr.GetNamespace(), metav1.GetOptions{}) + if err == nil { + return ns, nil + } + + if !errors.IsNotFound(err) { + return nil, admission.NewForbidden(attr, err) + } + + return nil, err +} + +func (a *managementCPUsOverride) waitForSyncedStore(timeout <-chan time.Time) bool { + for !a.nsListerSynced() || !a.nodeListSynced() || !a.infraConfigListSynced() { + select { + case <-time.After(100 * time.Millisecond): + case <-timeout: + return a.nsListerSynced() && a.nodeListSynced() && a.infraConfigListSynced() + } + } + + return true +} + +func updatePodResources(pod *coreapi.Pod, workloadType string, class coreapi.PodQOSClass) { + if pod.Annotations == nil { + pod.Annotations = map[string]string{} + } + + // update init containers resources + updateContainersResources(pod.Spec.InitContainers, pod.Annotations, workloadType, class) + + // update app containers resources + updateContainersResources(pod.Spec.Containers, pod.Annotations, workloadType, class) + + // re-add workload annotation + addWorkloadAnnotations(pod.Annotations, workloadType) +} + +func updateContainersResources(containers []coreapi.Container, podAnnotations map[string]string, workloadType string, podQoSClass coreapi.PodQOSClass) { + for i := range containers { + c := &containers[i] + cpusharesAnnotationKey := fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, c.Name) + + // make sure best effort is always 2 shares, it the minimal shares that supported + // see - https://github.com/kubernetes/kubernetes/blob/46563b0abebbb00e21db967950a1343e83a0c6a2/pkg/kubelet/cm/qos_container_manager_linux.go#L99 + if podQoSClass == coreapi.PodQOSBestEffort { + podAnnotations[cpusharesAnnotationKey] = fmt.Sprintf(`{"%s": 2}`, containerResourcesAnnotationValueKeyCPUShares) + continue + } + + if c.Resources.Requests != nil { + if _, ok := c.Resources.Requests[coreapi.ResourceCPU]; !ok { + continue + } + + cpuRequest := c.Resources.Requests[coreapi.ResourceCPU] + cpuRequestInMilli := cpuRequest.MilliValue() + + cpuShares := cm.MilliCPUToShares(cpuRequestInMilli) + podAnnotations[cpusharesAnnotationKey] = fmt.Sprintf(`{"%s": %d}`, containerResourcesAnnotationValueKeyCPUShares, cpuShares) + delete(c.Resources.Requests, coreapi.ResourceCPU) + + if c.Resources.Limits == nil { + c.Resources.Limits = coreapi.ResourceList{} + } + + // multiply the CPU request by 1000, to make sure that the resource will pass integer validation + managedResource := fmt.Sprintf("%s.%s", workloadType, containerWorkloadResourceSuffix) + newCPURequest := resource.NewMilliQuantity(cpuRequestInMilli*1000, cpuRequest.Format) + c.Resources.Requests[coreapi.ResourceName(managedResource)] = *newCPURequest + c.Resources.Limits[coreapi.ResourceName(managedResource)] = *newCPURequest + } + } +} + +func isGuaranteed(containers []coreapi.Container) bool { + for _, c := range containers { + // only memory and CPU resources are relevant to decide pod QoS class + for _, r := range []coreapi.ResourceName{coreapi.ResourceMemory, coreapi.ResourceCPU} { + limit := c.Resources.Limits[r] + request, requestExist := c.Resources.Requests[r] + + if limit.IsZero() { + return false + } + + if !requestExist { + continue + } + + // it some corner case, when you set CPU request to 0 the k8s will change it to the value + // specified under the limit + if r == coreapi.ResourceCPU && request.IsZero() { + continue + } + + if !limit.Equal(request) { + return false + } + } + } + + return true +} + +func isBestEffort(containers []coreapi.Container) bool { + for _, c := range containers { + // only memory and CPU resources are relevant to decide pod QoS class + for _, r := range []coreapi.ResourceName{coreapi.ResourceMemory, coreapi.ResourceCPU} { + limit := c.Resources.Limits[r] + request := c.Resources.Requests[r] + + if !limit.IsZero() || !request.IsZero() { + return false + } + } + } + + return true +} + +func getPodQoSClass(containers []coreapi.Container) coreapi.PodQOSClass { + if isGuaranteed(containers) { + return coreapi.PodQOSGuaranteed + } + + if isBestEffort(containers) { + return coreapi.PodQOSBestEffort + } + + return coreapi.PodQOSBurstable +} + +func podHasBothCPULimitAndRequest(containers []coreapi.Container) bool { + for _, c := range containers { + _, cpuRequestExists := c.Resources.Requests[coreapi.ResourceCPU] + _, cpuLimitExists := c.Resources.Limits[coreapi.ResourceCPU] + + if cpuRequestExists && cpuLimitExists { + return true + } + } + + return false +} + +func doesNamespaceAllowWorkloadType(annotations map[string]string, workloadType string) bool { + v, found := annotations[namespaceAllowedAnnotation] + if !found { + return false + } + + for _, t := range strings.Split(v, ",") { + if workloadType == t { + return true + } + } + + return false +} + +func getWorkloadType(annotations map[string]string) (string, error) { + var workloadAnnotationsKeys []string + for k := range annotations { + if strings.HasPrefix(k, podWorkloadTargetAnnotationPrefix) { + workloadAnnotationsKeys = append(workloadAnnotationsKeys, k) + } + } + + // no workload annotation is specified under the pod + if len(workloadAnnotationsKeys) == 0 { + return "", nil + } + + // more than one workload annotation exists under the pod and we do not support different workload types + // under the same pod + if len(workloadAnnotationsKeys) > 1 { + return "", fmt.Errorf("the pod can not have more than one workload annotations") + } + + workloadType := strings.TrimPrefix(workloadAnnotationsKeys[0], podWorkloadTargetAnnotationPrefix) + if len(workloadType) == 0 { + return "", fmt.Errorf("the workload annotation key should have format %s, when is non empty string", podWorkloadTargetAnnotationPrefix) + } + + return workloadType, nil +} + +func getWorkloadAnnotationEffect(workloadAnnotationKey string) (string, error) { + managementAnnotationValue := map[string]string{} + if err := json.Unmarshal([]byte(workloadAnnotationKey), &managementAnnotationValue); err != nil { + return "", fmt.Errorf("failed to parse %q annotation value: %v", workloadAnnotationKey, err) + } + + if len(managementAnnotationValue) > 1 { + return "", fmt.Errorf("the workload annotation value %q has more than one key", managementAnnotationValue) + } + + effect, ok := managementAnnotationValue[podWorkloadAnnotationEffect] + if !ok { + return "", fmt.Errorf("the workload annotation value %q does not have %q key", managementAnnotationValue, podWorkloadAnnotationEffect) + } + return effect, nil +} + +func stripResourcesAnnotations(annotations map[string]string) { + for k := range annotations { + if strings.HasPrefix(k, containerResourcesAnnotationPrefix) { + delete(annotations, k) + } + } +} + +func stripWorkloadAnnotations(annotations map[string]string) { + for k := range annotations { + if strings.HasPrefix(k, podWorkloadTargetAnnotationPrefix) { + delete(annotations, k) + } + } +} + +func addWorkloadAnnotations(annotations map[string]string, workloadType string) { + if annotations == nil { + annotations = map[string]string{} + } + + workloadAnnotation := fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadType) + annotations[workloadAnnotation] = fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling) +} + +func (a *managementCPUsOverride) Validate(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) (err error) { + if attr.GetResource().GroupResource() != coreapi.Resource("pods") || attr.GetSubresource() != "" { + return nil + } + + pod, ok := attr.GetObject().(*coreapi.Pod) + if !ok { + return admission.NewForbidden(attr, fmt.Errorf("unexpected object: %#v", attr.GetObject())) + } + + // do not validate mirror pods at all + if isStaticPod(pod.Annotations) { + return nil + } + + ns, err := a.getPodNamespace(attr) + if err != nil { + return err + } + + var allErrs field.ErrorList + workloadType, err := getWorkloadType(pod.Annotations) + if err != nil { + allErrs = append(allErrs, getPodInvalidWorkloadAnnotationError(pod.Annotations, err.Error())) + } + + workloadResourceAnnotations := map[string]map[string]int{} + for k, v := range pod.Annotations { + if !strings.HasPrefix(k, containerResourcesAnnotationPrefix) { + continue + } + + resourceAnnotationValue := map[string]int{} + if err := json.Unmarshal([]byte(v), &resourceAnnotationValue); err != nil { + allErrs = append(allErrs, getPodInvalidWorkloadAnnotationError(pod.Annotations, err.Error())) + } + workloadResourceAnnotations[k] = resourceAnnotationValue + } + + containersWorkloadResources := map[string]*coreapi.Container{} + allContainers := append([]coreapi.Container{}, pod.Spec.InitContainers...) + allContainers = append(allContainers, pod.Spec.Containers...) + for i := range allContainers { + c := &allContainers[i] + // we interested only in request because only the request affects the scheduler + for r := range c.Resources.Requests { + resourceName := string(r) + if strings.HasSuffix(resourceName, containerWorkloadResourceSuffix) { + containersWorkloadResources[resourceName] = c + } + } + } + + // the pod does not have workload annotation + if len(workloadType) == 0 { + if len(workloadResourceAnnotations) > 0 { + allErrs = append(allErrs, getPodInvalidWorkloadAnnotationError(pod.Annotations, "the pod without workload annotation can not have resource annotation")) + } + + for resourceName, c := range containersWorkloadResources { + if isDebugPod(pod.Annotations) { + warning.AddWarning(ctx, "", "You must pass --keep-annotations parameter to the debug command or upgrade the oc tool to the latest version when trying to debug a pod with workload partitioning resources.") + } + + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.containers.resources.requests"), c.Resources.Requests, fmt.Sprintf("the pod without workload annotations can not have containers with workload resources %q", resourceName))) + } + } else { + if !doesNamespaceAllowWorkloadType(ns.Annotations, workloadType) { // pod has workload annotation, but the pod does not have workload annotation + allErrs = append(allErrs, getPodInvalidWorkloadAnnotationError(pod.Annotations, fmt.Sprintf("the pod can not have workload annotation, when the namespace %q does not allow it", ns.Name))) + } + + for _, v := range workloadResourceAnnotations { + if len(v) > 1 { + allErrs = append(allErrs, field.Invalid(field.NewPath("metadata.annotations"), pod.Annotations, "the pod resource annotation value can not have more than one key")) + } + + // the pod should not have any resource annotations with the value that includes keys different from cpushares + if _, ok := v[containerResourcesAnnotationValueKeyCPUShares]; len(v) == 1 && !ok { + allErrs = append(allErrs, field.Invalid(field.NewPath("metadata.annotations"), pod.Annotations, "the pod resource annotation value should have only cpushares key")) + } + } + } + + if len(allErrs) == 0 { + return nil + } + + return errors.NewInvalid(coreapi.Kind("Pod"), pod.Name, allErrs) +} + +func getPodInvalidWorkloadAnnotationError(annotations map[string]string, message string) *field.Error { + return field.Invalid(field.NewPath("metadata.Annotations"), annotations, message) +} + +// isStaticPod returns true if the pod is a static pod. +func isStaticPod(annotations map[string]string) bool { + source, ok := annotations[kubetypes.ConfigSourceAnnotationKey] + return ok && source != kubetypes.ApiserverSource +} + +func isDebugPod(annotations map[string]string) bool { + _, ok := annotations[debugSourceResourceAnnotation] + return ok +} diff --git a/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission_test.go b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission_test.go new file mode 100644 index 0000000000000..114a5ad3865f0 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission_test.go @@ -0,0 +1,683 @@ +package managementcpusoverride + +import ( + "context" + "fmt" + "reflect" + "strings" + "testing" + + configv1 "github.com/openshift/api/config/v1" + configv1listers "github.com/openshift/client-go/config/listers/config/v1" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/kubernetes/fake" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + coreapi "k8s.io/kubernetes/pkg/apis/core" + kapi "k8s.io/kubernetes/pkg/apis/core" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" +) + +const ( + // workloadTypeManagement contains the type for the management workload + workloadTypeManagement = "management" + // managedCapacityLabel contains the name of the new management resource that will available under the node + managedCapacityLabel = "management.workload.openshift.io/cores" +) + +func getMockManagementCPUsOverride(namespace *corev1.Namespace, nodes []*corev1.Node, infra *configv1.Infrastructure) (*managementCPUsOverride, error) { + m := &managementCPUsOverride{ + Handler: admission.NewHandler(admission.Create), + client: &fake.Clientset{}, + nsLister: fakeNamespaceLister(namespace), + nsListerSynced: func() bool { return true }, + nodeLister: fakeNodeLister(nodes), + nodeListSynced: func() bool { return true }, + infraConfigLister: fakeInfraConfigLister(infra), + infraConfigListSynced: func() bool { return true }, + } + if err := m.ValidateInitialization(); err != nil { + return nil, err + } + + return m, nil +} + +func fakeNamespaceLister(ns *corev1.Namespace) corev1listers.NamespaceLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + _ = indexer.Add(ns) + return corev1listers.NewNamespaceLister(indexer) +} + +func fakeNodeLister(nodes []*corev1.Node) corev1listers.NodeLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + for _, node := range nodes { + _ = indexer.Add(node) + } + return corev1listers.NewNodeLister(indexer) +} + +func fakeInfraConfigLister(infra *configv1.Infrastructure) configv1listers.InfrastructureLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + if infra != nil { + _ = indexer.Add(infra) + } + return configv1listers.NewInfrastructureLister(indexer) +} + +func TestAdmit(t *testing.T) { + tests := []struct { + name string + pod *kapi.Pod + namespace *corev1.Namespace + nodes []*corev1.Node + infra *configv1.Infrastructure + expectedCpuRequest resource.Quantity + expectedAnnotations map[string]string + expectedError error + }{ + { + name: "should return admission error when the pod namespace does not allow the workload type", + pod: testManagedPod("500m", "250m", "500Mi", "250Mi"), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + expectedError: fmt.Errorf("the pod namespace %q does not allow the workload type management", "namespace"), + }, + { + name: "should ignore pods that do not have managed annotation", + pod: testPod("500m", "250m", "500Mi", "250Mi"), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + }, + { + name: "should return admission error when the pod has more than one workload annotation", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): "", + fmt.Sprintf("%stest", podWorkloadTargetAnnotationPrefix): "", + }, + ), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + expectedError: fmt.Errorf("the pod can not have more than one workload annotations"), + }, + { + name: "should return admission error when the pod has incorrect workload annotation", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + podWorkloadTargetAnnotationPrefix: "", + }, + ), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + expectedError: fmt.Errorf("the workload annotation key should have format %s", podWorkloadTargetAnnotationPrefix), + }, + { + name: "should return admission error when the pod has incorrect workload annotation effect", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): "{", + }, + ), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + expectedError: fmt.Errorf(`failed to get workload annotation effect: failed to parse "{" annotation value: unexpected end of JSON input`), + }, + { + name: "should return admission error when the pod has workload annotation without effect value", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): `{"test": "test"}`, + }, + ), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + expectedError: fmt.Errorf(`failed to get workload annotation effect: the workload annotation value map["test":"test"] does not have "effect" key`), + infra: testClusterSNOInfra(), + }, + { + name: "should delete CPU requests and update workload CPU annotations for the burstable pod with managed annotation", + pod: testManagedPod("", "250m", "500Mi", "250Mi"), + expectedCpuRequest: resource.Quantity{}, + namespace: testManagedNamespace(), + expectedAnnotations: map[string]string{ + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "test"): fmt.Sprintf(`{"%s": 256}`, containerResourcesAnnotationValueKeyCPUShares), + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "initTest"): fmt.Sprintf(`{"%s": 256}`, containerResourcesAnnotationValueKeyCPUShares), + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling), + }, + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + }, + { + name: "should update workload CPU annotations for the best-effort pod with managed annotation", + pod: testManagedPod("", "", "", ""), + expectedCpuRequest: resource.Quantity{}, + namespace: testManagedNamespace(), + expectedAnnotations: map[string]string{ + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "test"): fmt.Sprintf(`{"%s": 2}`, containerResourcesAnnotationValueKeyCPUShares), + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "initTest"): fmt.Sprintf(`{"%s": 2}`, containerResourcesAnnotationValueKeyCPUShares), + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling), + }, + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + }, + { + name: "should skip static pod mutation", + pod: testManagedStaticPod("500m", "250m", "500Mi", "250Mi"), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + expectedAnnotations: map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling), + kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource, + }, + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + }, + { + name: "should ignore guaranteed pod", + pod: testManagedPod("500m", "500m", "500Mi", "500Mi"), + expectedCpuRequest: resource.MustParse("500m"), + namespace: testManagedNamespace(), + expectedAnnotations: map[string]string{ + workloadAdmissionWarning: "skip pod CPUs requests modifications because it has guaranteed QoS class", + }, + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + }, + { + name: "should ignore pod when one of pod containers have both CPU limit and request", + pod: testManagedPod("500m", "250m", "500Mi", ""), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + expectedAnnotations: map[string]string{ + workloadAdmissionWarning: fmt.Sprintf("skip pod CPUs requests modifications because pod container has both CPU limit and request"), + }, + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + }, + { + name: "should ignore pod when removing the CPU request will change the pod QoS class to best-effort", + pod: testManagedPod("", "250m", "", ""), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + expectedAnnotations: map[string]string{ + workloadAdmissionWarning: fmt.Sprintf("skip pod CPUs requests modifications because it will change the pod QoS class from %s to %s", corev1.PodQOSBurstable, corev1.PodQOSBestEffort), + }, + nodes: []*corev1.Node{testNodeWithManagementResource()}, + infra: testClusterSNOInfra(), + }, + { + name: "should not mutate the pod when at least one node does not have management resources", + pod: testManagedPod("500m", "250m", "500Mi", "250Mi"), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNode()}, + infra: testClusterSNOInfra(), + }, + { + name: "should return admission error when the cluster does not have any nodes", + pod: testManagedPod("500m", "250m", "500Mi", "250Mi"), + expectedCpuRequest: resource.MustParse("250m"), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{}, + infra: testClusterSNOInfra(), + expectedError: fmt.Errorf("the cluster does not have any nodes"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + m, err := getMockManagementCPUsOverride(test.namespace, test.nodes, test.infra) + if err != nil { + t.Fatalf("%s: failed to get mock managementCPUsOverride: %v", test.name, err) + } + + test.pod.Namespace = test.namespace.Name + + attrs := admission.NewAttributesRecord(test.pod, nil, schema.GroupVersionKind{}, test.pod.Namespace, test.pod.Name, kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, fakeUser()) + err = m.Admit(context.TODO(), attrs, nil) + if err != nil { + if test.expectedError == nil { + t.Fatalf("%s: admission controller returned error: %v", test.name, err) + } + + if !strings.Contains(err.Error(), test.expectedError.Error()) { + t.Fatalf("%s: the expected error %v, got %v", test.name, test.expectedError, err) + } + } + + if err == nil && test.expectedError != nil { + t.Fatalf("%s: the expected error %v, got nil", test.name, test.expectedError) + } + + if test.expectedAnnotations != nil && !reflect.DeepEqual(test.expectedAnnotations, test.pod.Annotations) { + t.Fatalf("%s: the pod annotations do not match; %v should be %v", test.name, test.pod.Annotations, test.expectedAnnotations) + } + + resources := test.pod.Spec.InitContainers[0].Resources // only test one container + if actual := resources.Requests[kapi.ResourceCPU]; test.expectedCpuRequest.Cmp(actual) != 0 { + t.Fatalf("%s: cpu requests do not match; %v should be %v", test.name, actual, test.expectedCpuRequest) + } + + resources = test.pod.Spec.Containers[0].Resources // only test one container + if actual := resources.Requests[kapi.ResourceCPU]; test.expectedCpuRequest.Cmp(actual) != 0 { + t.Fatalf("%s: cpu requests do not match; %v should be %v", test.name, actual, test.expectedCpuRequest) + } + }) + } +} + +func TestGetPodQoSClass(t *testing.T) { + tests := []struct { + name string + pod *kapi.Pod + expectedQoSClass coreapi.PodQOSClass + }{ + { + name: "should recognize best-effort pod", + pod: testManagedPod("", "", "", ""), + expectedQoSClass: coreapi.PodQOSBestEffort, + }, + { + name: "should recognize guaranteed pod", + pod: testManagedPod("100m", "100m", "100Mi", "100Mi"), + expectedQoSClass: coreapi.PodQOSGuaranteed, + }, + { + name: "should recognize guaranteed pod when CPU request equals to 0", + pod: testManagedPod("100m", "0", "100Mi", "100Mi"), + expectedQoSClass: coreapi.PodQOSGuaranteed, + }, + { + name: "should recognize burstable pod with only CPU limit", + pod: testManagedPod("100m", "", "", ""), + expectedQoSClass: coreapi.PodQOSBurstable, + }, + { + name: "should recognize burstable pod with only CPU request", + pod: testManagedPod("", "100m", "", ""), + expectedQoSClass: coreapi.PodQOSBurstable, + }, + { + name: "should recognize burstable pod with only memory limit", + pod: testManagedPod("", "", "100Mi", ""), + expectedQoSClass: coreapi.PodQOSBurstable, + }, + { + name: "should recognize burstable pod with only memory request", + pod: testManagedPod("", "", "", "100Mi"), + expectedQoSClass: coreapi.PodQOSBurstable, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + allContainers := append([]coreapi.Container{}, test.pod.Spec.InitContainers...) + allContainers = append(allContainers, test.pod.Spec.Containers...) + qosClass := getPodQoSClass(allContainers) + if qosClass != test.expectedQoSClass { + t.Fatalf("%s: pod has QoS class %s; should be %s", test.name, qosClass, test.expectedQoSClass) + } + }) + } +} + +func TestValidate(t *testing.T) { + tests := []struct { + name string + pod *kapi.Pod + namespace *corev1.Namespace + nodes []*corev1.Node + expectedError error + }{ + { + name: "should return invalid error when the pod has more than one workload annotation", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): "", + fmt.Sprintf("%stest", podWorkloadTargetAnnotationPrefix): "", + }, + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + expectedError: fmt.Errorf("the pod can not have more than one workload annotations"), + }, + { + name: "should return invalid error when the pod has incorrect workload annotation", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + podWorkloadTargetAnnotationPrefix: "", + }, + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + expectedError: fmt.Errorf("the workload annotation key should have format %s", podWorkloadTargetAnnotationPrefix), + }, + { + name: "should return invalid error when the pod has cpuset resource annotation", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling), + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "test"): `{"cpuset": 1}`, + }, + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + expectedError: fmt.Errorf("he pod resource annotation value should have only cpushares key"), + }, + { + name: "should return invalid error when the pod does not have workload annotation, but has resource annotation", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "test"): fmt.Sprintf(`{"%s": 2}`, containerResourcesAnnotationValueKeyCPUShares), + }, + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + expectedError: fmt.Errorf("the pod without workload annotation can not have resource annotation"), + }, + { + name: "should return invalid error when the pod does not have workload annotation, but the container has management resource", + pod: testPodWithManagedResource( + "500m", + "250m", + "500Mi", + "250Mi", + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + expectedError: fmt.Errorf("the pod without workload annotations can not have containers with workload resources %q", "management.workload.openshift.io/cores"), + }, + { + name: "should return invalid error when the pod has workload annotation, but the pod namespace does not have allowed annotation", + pod: testManagedPod( + "500m", + "250m", + "500Mi", + "250Mi", + ), + namespace: testNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + expectedError: fmt.Errorf("the pod can not have workload annotation, when the namespace %q does not allow it", "namespace"), + }, + { + name: "should not return any errors when the pod and namespace valid", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "test"): fmt.Sprintf(`{"%s": 256}`, containerResourcesAnnotationValueKeyCPUShares), + fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, "initTest"): fmt.Sprintf(`{"%s": 256}`, containerResourcesAnnotationValueKeyCPUShares), + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling), + }, + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + }, + { + name: "should skip static pod validation", + pod: testManagedPodWithAnnotations( + "500m", + "250m", + "500Mi", + "250Mi", + map[string]string{ + fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement): "", + fmt.Sprintf("%stest", podWorkloadTargetAnnotationPrefix): "", + kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource, + }, + ), + namespace: testManagedNamespace(), + nodes: []*corev1.Node{testNodeWithManagementResource()}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + m, err := getMockManagementCPUsOverride(test.namespace, test.nodes, nil) + if err != nil { + t.Fatalf("%s: failed to get mock managementCPUsOverride: %v", test.name, err) + } + test.pod.Namespace = test.namespace.Name + + attrs := admission.NewAttributesRecord(test.pod, nil, schema.GroupVersionKind{}, test.pod.Namespace, test.pod.Name, kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, fakeUser()) + err = m.Validate(context.TODO(), attrs, nil) + if err != nil { + if test.expectedError == nil { + t.Fatalf("%s: admission controller returned error: %v", test.name, err) + } + + if !strings.Contains(err.Error(), test.expectedError.Error()) { + t.Fatalf("%s: the expected error %v, got %v", test.name, test.expectedError, err) + } + } + + if err == nil && test.expectedError != nil { + t.Fatalf("%s: the expected error %v, got nil", test.name, test.expectedError) + } + }) + } +} + +func testPodWithManagedResource(cpuLimit, cpuRequest, memoryLimit, memoryRequest string) *kapi.Pod { + pod := testPod(cpuLimit, cpuRequest, memoryLimit, memoryRequest) + + managedResourceName := fmt.Sprintf("%s.%s", workloadTypeManagement, containerWorkloadResourceSuffix) + + managedResourceQuantity := resource.MustParse("26") + pod.Spec.Containers[0].Resources.Requests[kapi.ResourceName(managedResourceName)] = managedResourceQuantity + return pod +} + +func testManagedPodWithAnnotations(cpuLimit, cpuRequest, memoryLimit, memoryRequest string, annotations map[string]string) *kapi.Pod { + pod := testManagedPod(cpuLimit, cpuRequest, memoryLimit, memoryRequest) + pod.Annotations = annotations + return pod +} + +func testManagedStaticPod(cpuLimit, cpuRequest, memoryLimit, memoryRequest string) *kapi.Pod { + pod := testManagedPod(cpuLimit, cpuRequest, memoryLimit, memoryRequest) + pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource + return pod +} + +func testManagedPod(cpuLimit, cpuRequest, memoryLimit, memoryRequest string) *kapi.Pod { + pod := testPod(cpuLimit, cpuRequest, memoryLimit, memoryRequest) + + pod.Annotations = map[string]string{} + for _, c := range pod.Spec.InitContainers { + cpusetAnnotation := fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, c.Name) + pod.Annotations[cpusetAnnotation] = `{"cpuset": "0-1"}` + } + for _, c := range pod.Spec.Containers { + cpusetAnnotation := fmt.Sprintf("%s%s", containerResourcesAnnotationPrefix, c.Name) + pod.Annotations[cpusetAnnotation] = `{"cpuset": "0-1"}` + } + + managementWorkloadAnnotation := fmt.Sprintf("%s%s", podWorkloadTargetAnnotationPrefix, workloadTypeManagement) + pod.Annotations = map[string]string{ + managementWorkloadAnnotation: fmt.Sprintf(`{"%s":"%s"}`, podWorkloadAnnotationEffect, workloadEffectPreferredDuringScheduling), + } + + return pod +} + +func testPod(cpuLimit, cpuRequest, memoryLimit, memoryRequest string) *kapi.Pod { + pod := &kapi.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: kapi.PodSpec{ + InitContainers: []kapi.Container{ + { + Name: "initTest", + }, + }, + Containers: []kapi.Container{ + { + Name: "test", + }, + }, + }, + } + + var limits kapi.ResourceList + // we need this kind of statement to verify assignment to entry in nil map + if cpuLimit != "" || memoryLimit != "" { + limits = kapi.ResourceList{} + if cpuLimit != "" { + limits[kapi.ResourceCPU] = resource.MustParse(cpuLimit) + } + + if memoryLimit != "" { + limits[kapi.ResourceMemory] = resource.MustParse(memoryLimit) + } + + pod.Spec.InitContainers[0].Resources.Limits = limits.DeepCopy() + pod.Spec.Containers[0].Resources.Limits = limits.DeepCopy() + } + + var requests kapi.ResourceList + // we need this kind of statement to verify assignment to entry in nil map + if cpuRequest != "" || memoryRequest != "" { + requests = kapi.ResourceList{} + if cpuRequest != "" { + requests[kapi.ResourceCPU] = resource.MustParse(cpuRequest) + } + if memoryRequest != "" { + requests[kapi.ResourceMemory] = resource.MustParse(memoryRequest) + } + + pod.Spec.InitContainers[0].Resources.Requests = requests.DeepCopy() + pod.Spec.Containers[0].Resources.Requests = requests.DeepCopy() + } + + return pod +} + +func fakeUser() user.Info { + return &user.DefaultInfo{ + Name: "testuser", + } +} + +func testNamespace() *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + } +} + +func testManagedNamespace() *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "managed-namespace", + Annotations: map[string]string{ + namespaceAllowedAnnotation: fmt.Sprintf("%s,test", workloadTypeManagement), + }, + }, + } +} + +func testNode() *corev1.Node { + return &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node", + }, + } +} + +func testNodeWithManagementResource() *corev1.Node { + q := resource.NewQuantity(16000, resource.DecimalSI) + return &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "managed-node", + }, + Status: corev1.NodeStatus{ + Allocatable: corev1.ResourceList{ + managedCapacityLabel: *q, + }, + }, + } +} + +func testClusterInfraWithoutAnyStatusFields() *configv1.Infrastructure { + return &configv1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{ + Name: infraClusterName, + }, + } +} + +func testClusterSNOInfra() *configv1.Infrastructure { + return &configv1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{ + Name: infraClusterName, + }, + Status: configv1.InfrastructureStatus{ + APIServerURL: "test", + ControlPlaneTopology: configv1.SingleReplicaTopologyMode, + InfrastructureTopology: configv1.SingleReplicaTopologyMode, + CPUPartitioning: configv1.CPUPartitioningAllNodes, + }, + } +} + +func testClusterInfraWithoutTopologyFields() *configv1.Infrastructure { + infra := testClusterSNOInfra() + infra.Status.ControlPlaneTopology = "" + infra.Status.InfrastructureTopology = "" + return infra +} diff --git a/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/doc.go b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/doc.go new file mode 100644 index 0000000000000..bcd9c74ec4723 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/doc.go @@ -0,0 +1,16 @@ +package managementcpusoverride + +// The ManagementCPUOverride admission plugin replaces pod container CPU requests with a new management resource. +// It applies to all pods that: +// 1. are in an allowed namespace +// 2. and have the workload annotation. +// +// It also sets the new management resource request and limit and set resource annotation that CRI-O can +// recognize and apply the relevant changes. +// For more information, see - https://github.com/openshift/enhancements/pull/703 +// +// Conditions for CPUs requests deletion: +// 1. The namespace should have allowed annotation "workload.openshift.io/allowed": "management" +// 2. The pod should have management annotation: "workload.openshift.io/management": "{"effect": "PreferredDuringScheduling"}" +// 3. All nodes under the cluster should have new management resource - "management.workload.openshift.io/cores" +// 4. The CPU request deletion will not change the pod QoS class diff --git a/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/initializers.go b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/initializers.go new file mode 100644 index 0000000000000..02fcd69ebbcb4 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/initializers.go @@ -0,0 +1,28 @@ +package managementcpusoverride + +import ( + "k8s.io/apiserver/pkg/admission" + + configv1informer "github.com/openshift/client-go/config/informers/externalversions/config/v1" +) + +func NewInitializer(infraInformer configv1informer.InfrastructureInformer) admission.PluginInitializer { + return &localInitializer{infraInformer: infraInformer} +} + +type WantsInfraInformer interface { + SetInfraInformer(informer configv1informer.InfrastructureInformer) + admission.InitializationValidator +} + +type localInitializer struct { + infraInformer configv1informer.InfrastructureInformer +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsInfraInformer); ok { + wants.SetInfraInformer(i.infraInformer) + } +} diff --git a/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission.go b/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission.go new file mode 100644 index 0000000000000..9326205f9b333 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission.go @@ -0,0 +1,148 @@ +package runonceduration + +import ( + "context" + "errors" + "fmt" + "io" + "strconv" + + "k8s.io/klog/v2" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + kapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/utils/integer" + + "github.com/openshift/library-go/pkg/config/helpers" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation" +) + +func Register(plugins *admission.Plugins) { + plugins.Register("autoscaling.openshift.io/RunOnceDuration", + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", "autoscaling.openshift.io/RunOnceDuration") + return nil, nil + } + return NewRunOnceDuration(pluginConfig), nil + }) +} + +func readConfig(reader io.Reader) (*runonceduration.RunOnceDurationConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, runonceduration.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*runonceduration.RunOnceDurationConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object %#v", obj) + } + errs := validation.ValidateRunOnceDurationConfig(config) + if len(errs) > 0 { + return nil, errs.ToAggregate() + } + return config, nil +} + +// NewRunOnceDuration creates a new RunOnceDuration admission plugin +func NewRunOnceDuration(config *runonceduration.RunOnceDurationConfig) admission.Interface { + return &runOnceDuration{ + Handler: admission.NewHandler(admission.Create), + config: config, + } +} + +type runOnceDuration struct { + *admission.Handler + config *runonceduration.RunOnceDurationConfig + nsLister corev1listers.NamespaceLister +} + +var _ = initializer.WantsExternalKubeInformerFactory(&runOnceDuration{}) + +func (a *runOnceDuration) Admit(ctx context.Context, attributes admission.Attributes, _ admission.ObjectInterfaces) error { + switch { + case a.config == nil, + attributes.GetResource().GroupResource() != kapi.Resource("pods"), + len(attributes.GetSubresource()) > 0: + return nil + } + pod, ok := attributes.GetObject().(*kapi.Pod) + if !ok { + return admission.NewForbidden(attributes, fmt.Errorf("unexpected object: %#v", attributes.GetObject())) + } + + // Only update pods with a restart policy of Never or OnFailure + switch pod.Spec.RestartPolicy { + case kapi.RestartPolicyNever, + kapi.RestartPolicyOnFailure: + // continue + default: + return nil + } + + appliedProjectLimit, err := a.applyProjectAnnotationLimit(attributes.GetNamespace(), pod) + if err != nil { + return admission.NewForbidden(attributes, err) + } + + if !appliedProjectLimit && a.config.ActiveDeadlineSecondsOverride != nil { + pod.Spec.ActiveDeadlineSeconds = int64MinP(a.config.ActiveDeadlineSecondsOverride, pod.Spec.ActiveDeadlineSeconds) + } + return nil +} + +func (a *runOnceDuration) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + a.nsLister = kubeInformers.Core().V1().Namespaces().Lister() +} + +func (a *runOnceDuration) ValidateInitialization() error { + if a.nsLister == nil { + return errors.New("autoscaling.openshift.io/RunOnceDuration plugin requires a namespace listers") + } + return nil +} + +func (a *runOnceDuration) applyProjectAnnotationLimit(namespace string, pod *kapi.Pod) (bool, error) { + ns, err := a.nsLister.Get(namespace) + if err != nil { + return false, fmt.Errorf("error looking up pod namespace: %v", err) + } + if ns.Annotations == nil { + return false, nil + } + limit, hasLimit := ns.Annotations[runonceduration.ActiveDeadlineSecondsLimitAnnotation] + if !hasLimit { + return false, nil + } + limitInt64, err := strconv.ParseInt(limit, 10, 64) + if err != nil { + return false, fmt.Errorf("cannot parse the ActiveDeadlineSeconds limit (%s) for project %s: %v", limit, ns.Name, err) + } + pod.Spec.ActiveDeadlineSeconds = int64MinP(&limitInt64, pod.Spec.ActiveDeadlineSeconds) + return true, nil +} + +func int64MinP(a, b *int64) *int64 { + switch { + case a == nil: + return b + case b == nil: + return a + default: + c := integer.Int64Min(*a, *b) + return &c + } +} diff --git a/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission_test.go b/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission_test.go new file mode 100644 index 0000000000000..856d32801bfbb --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission_test.go @@ -0,0 +1,215 @@ +package runonceduration + +import ( + "bytes" + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apiserver/pkg/admission" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + kapi "k8s.io/kubernetes/pkg/apis/core" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +func fakeNamespaceLister(projectAnnotations map[string]string) corev1listers.NamespaceLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + ns := &corev1.Namespace{} + ns.Name = "default" + ns.Annotations = projectAnnotations + indexer.Add(ns) + return corev1listers.NewNamespaceLister(indexer) +} + +func testConfig(n *int64) *runonceduration.RunOnceDurationConfig { + return &runonceduration.RunOnceDurationConfig{ + ActiveDeadlineSecondsOverride: n, + } +} + +func testRunOncePod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.RestartPolicy = kapi.RestartPolicyNever + return pod +} + +func testRestartOnFailurePod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.RestartPolicy = kapi.RestartPolicyOnFailure + return pod +} + +func testRunOncePodWithDuration(n int64) *kapi.Pod { + pod := testRunOncePod() + pod.Spec.ActiveDeadlineSeconds = &n + return pod +} + +func testRestartAlwaysPod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.RestartPolicy = kapi.RestartPolicyAlways + return pod +} + +func int64p(n int64) *int64 { + return &n +} + +func TestRunOnceDurationAdmit(t *testing.T) { + tests := []struct { + name string + config *runonceduration.RunOnceDurationConfig + pod *kapi.Pod + projectAnnotations map[string]string + expectedActiveDeadlineSeconds *int64 + }{ + { + name: "expect globally configured duration to be set", + config: testConfig(int64p(10)), + pod: testRunOncePod(), + expectedActiveDeadlineSeconds: int64p(10), + }, + { + name: "empty config, no duration to be set", + config: testConfig(nil), + pod: testRunOncePod(), + expectedActiveDeadlineSeconds: nil, + }, + { + name: "expect configured duration to not limit lower existing duration", + config: testConfig(int64p(10)), + pod: testRunOncePodWithDuration(5), + expectedActiveDeadlineSeconds: int64p(5), + }, + { + name: "expect empty config to not limit existing duration", + config: testConfig(nil), + pod: testRunOncePodWithDuration(5), + expectedActiveDeadlineSeconds: int64p(5), + }, + { + name: "expect project limit to be used with nil global value", + config: testConfig(nil), + pod: testRunOncePodWithDuration(2000), + projectAnnotations: map[string]string{ + runonceduration.ActiveDeadlineSecondsLimitAnnotation: "1000", + }, + expectedActiveDeadlineSeconds: int64p(1000), + }, + { + name: "expect project limit to not limit a smaller set value", + config: testConfig(nil), + pod: testRunOncePodWithDuration(10), + projectAnnotations: map[string]string{ + runonceduration.ActiveDeadlineSecondsLimitAnnotation: "1000", + }, + expectedActiveDeadlineSeconds: int64p(10), + }, + { + name: "expect project limit to have priority over global config value", + config: testConfig(int64p(10)), + pod: testRunOncePodWithDuration(2000), + projectAnnotations: map[string]string{ + runonceduration.ActiveDeadlineSecondsLimitAnnotation: "1000", + }, + expectedActiveDeadlineSeconds: int64p(1000), + }, + { + name: "make no change to a pod that is not a run-once pod", + config: testConfig(int64p(10)), + pod: testRestartAlwaysPod(), + expectedActiveDeadlineSeconds: nil, + }, + { + name: "update a pod that has a RestartOnFailure policy", + config: testConfig(int64p(10)), + pod: testRestartOnFailurePod(), + expectedActiveDeadlineSeconds: int64p(10), + }, + } + + for _, tc := range tests { + admissionPlugin := NewRunOnceDuration(tc.config) + admissionPlugin.(*runOnceDuration).nsLister = fakeNamespaceLister(tc.projectAnnotations) + pod := tc.pod + attrs := admission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), "default", "test", kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, nil) + if err := admissionPlugin.(admission.MutationInterface).Admit(context.TODO(), attrs, nil); err != nil { + t.Errorf("%s: unexpected mutating admission error: %v", tc.name, err) + continue + } + + switch { + case tc.expectedActiveDeadlineSeconds == nil && pod.Spec.ActiveDeadlineSeconds == nil: + // continue + case tc.expectedActiveDeadlineSeconds == nil && pod.Spec.ActiveDeadlineSeconds != nil: + t.Errorf("%s: expected nil ActiveDeadlineSeconds. Got: %d", tc.name, *pod.Spec.ActiveDeadlineSeconds) + case tc.expectedActiveDeadlineSeconds != nil && pod.Spec.ActiveDeadlineSeconds == nil: + t.Errorf("%s: unexpected nil ActiveDeadlineSeconds.", tc.name) + case *pod.Spec.ActiveDeadlineSeconds != *tc.expectedActiveDeadlineSeconds: + t.Errorf("%s: unexpected active deadline seconds: %d", tc.name, *pod.Spec.ActiveDeadlineSeconds) + } + } +} + +func TestReadConfig(t *testing.T) { + configStr := `apiVersion: autoscaling.openshift.io/v1 +kind: RunOnceDurationConfig +activeDeadlineSecondsOverride: 3600 +` + buf := bytes.NewBufferString(configStr) + config, err := readConfig(buf) + if err != nil { + t.Fatalf("unexpected error reading config: %v", err) + } + if config.ActiveDeadlineSecondsOverride == nil { + t.Fatalf("nil value for ActiveDeadlineSecondsLimit") + } + if *config.ActiveDeadlineSecondsOverride != 3600 { + t.Errorf("unexpected value for ActiveDeadlineSecondsLimit: %d", config.ActiveDeadlineSecondsOverride) + } +} + +func TestInt64MinP(t *testing.T) { + ten := int64(10) + twenty := int64(20) + tests := []struct { + a, b, expected *int64 + }{ + { + a: &ten, + b: nil, + expected: &ten, + }, + { + a: nil, + b: &ten, + expected: &ten, + }, + { + a: &ten, + b: &twenty, + expected: &ten, + }, + { + a: nil, + b: nil, + expected: nil, + }, + } + + for _, test := range tests { + actual := int64MinP(test.a, test.b) + switch { + case actual == nil && test.expected != nil, + test.expected == nil && actual != nil: + t.Errorf("unexpected %v for %#v", actual, test) + continue + case actual == nil: + continue + case *actual != *test.expected: + t.Errorf("unexpected: %v for %#v", actual, test) + } + } +} diff --git a/openshift-kube-apiserver/admission/autoscaling/runonceduration/doc.go b/openshift-kube-apiserver/admission/autoscaling/runonceduration/doc.go new file mode 100644 index 0000000000000..15a3a3ae39143 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/runonceduration/doc.go @@ -0,0 +1,21 @@ +/* +Package runonceduration contains the RunOnceDuration admission control plugin. +The plugin allows overriding the ActiveDeadlineSeconds for pods that have a +RestartPolicy of RestartPolicyNever (run once). If configured to allow a project +annotation override, and an annotation exists in the pod's namespace of: + + openshift.io/active-deadline-seconds-override + +the value of the annotation will take precedence over the globally configured +value in the plugin's configuration. + +# Configuration + +The plugin is configured via a RunOnceDurationConfig object: + + apiVersion: v1 + kind: RunOnceDurationConfig + enabled: true + activeDeadlineSecondsOverride: 3600 +*/ +package runonceduration diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount/validate_apirequestcount.go b/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount/validate_apirequestcount.go new file mode 100644 index 0000000000000..c35a7a1fad20a --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount/validate_apirequestcount.go @@ -0,0 +1,109 @@ +package apirequestcount + +import ( + "context" + "fmt" + "io" + "strings" + + apiv1 "github.com/openshift/api/apiserver/v1" + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateAPIRequestCount" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return newValidateAPIRequestCount() + }) +} + +func newValidateAPIRequestCount() (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + apiv1.GroupVersion.WithResource("apirequestcounts").GroupResource(): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + apiv1.GroupVersion.WithKind("APIRequestCount"): apiRequestCountV1{}, + }) +} + +type apiRequestCountV1 struct { +} + +func toAPIRequestCountV1(uncastObj runtime.Object) (*apiv1.APIRequestCount, field.ErrorList) { + obj, ok := uncastObj.(*apiv1.APIRequestCount) + if !ok { + return nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"APIRequestCount"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"apiserver.openshift.io/v1"}), + } + } + + return obj, nil +} + +func (a apiRequestCountV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, errs := toAPIRequestCountV1(uncastObj) + if len(errs) > 0 { + return errs + } + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, requireNameGVR, field.NewPath("metadata"))...) + return errs +} + +// requireNameGVR is a name validation function that requires the name to be of the form 'resource.version.group'. +func requireNameGVR(name string, _ bool) []string { + if _, err := NameToResource(name); err != nil { + return []string{err.Error()} + } + return nil +} + +// NameToResource parses a name of the form 'resource.version.group'. +func NameToResource(name string) (schema.GroupVersionResource, error) { + segments := strings.SplitN(name, ".", 3) + result := schema.GroupVersionResource{Resource: segments[0]} + switch len(segments) { + case 3: + result.Group = segments[2] + fallthrough + case 2: + result.Version = segments[1] + default: + return schema.GroupVersionResource{}, fmt.Errorf("apirequestcount %s: name must be of the form 'resource.version.group'", name) + } + return result, nil +} + +func (a apiRequestCountV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAPIRequestCountV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAPIRequestCountV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + return errs +} + +func (a apiRequestCountV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAPIRequestCountV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAPIRequestCountV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount/validate_apirequestcount_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount/validate_apirequestcount_test.go new file mode 100644 index 0000000000000..f69dd194fcc30 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount/validate_apirequestcount_test.go @@ -0,0 +1,35 @@ +package apirequestcount + +import ( + "context" + "testing" + + apiv1 "github.com/openshift/api/apiserver/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestApiRequestCountV1_ValidateCreate(t *testing.T) { + testCases := []struct { + name string + errExpected bool + }{ + {"nogood", true}, + {"resource.version", false}, + {"resource.groupnonsense", false}, + {"resource.version.group", false}, + {"resource.version.group.with.dots", false}, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + errs := apiRequestCountV1{}.ValidateCreate(context.TODO(), &apiv1.APIRequestCount{ObjectMeta: metav1.ObjectMeta{Name: tc.name}}) + if tc.errExpected != (len(errs) != 0) { + s := "did not expect " + if tc.errExpected { + s = "expected " + } + t.Errorf("%serrors, but got %d errors: %v", s, len(errs), errs) + } + }) + } + +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver.go b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver.go new file mode 100644 index 0000000000000..337cbb686a2a5 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver.go @@ -0,0 +1,259 @@ +package apiserver + +import ( + "context" + "fmt" + "regexp" + "strings" + + "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" + + configv1 "github.com/openshift/api/config/v1" + configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + libgocrypto "github.com/openshift/library-go/pkg/crypto" +) + +func toAPIServerV1(uncastObj runtime.Object) (*configv1.APIServer, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + errs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.APIServer) + if !ok { + return nil, append(errs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"APIServer"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type apiserverV1 struct { + infrastructureGetter func() configv1client.InfrastructuresGetter +} + +func (a apiserverV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, errs := toAPIServerV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + errs = append(errs, validateAPIServerSpecCreate(obj.Spec)...) + errs = append(errs, a.validateSNINames(obj)...) + + return errs +} + +func (a apiserverV1) validateSNINames(obj *configv1.APIServer) field.ErrorList { + errs := field.ErrorList{} + if len(obj.Spec.ServingCerts.NamedCertificates) == 0 { + return errs + } + + infrastructure, err := a.infrastructureGetter().Infrastructures().Get(context.TODO(), "cluster", metav1.GetOptions{}) + if err != nil { + errs = append(errs, field.InternalError(field.NewPath("metadata"), err)) + } + for i, currSNI := range obj.Spec.ServingCerts.NamedCertificates { + // if names are specified, confirm they do not match + // if names are not specified, the cert can still match, but only the operator resolves the secrets down. We gain a lot of benefit by being sure + // we don't allow an explicit override of these values + for j, currName := range currSNI.Names { + path := field.NewPath("spec").Child("servingCerts").Index(i).Child("names").Index(j) + if currName == infrastructure.Status.APIServerInternalURL { + errs = append(errs, field.Invalid(path, currName, fmt.Sprintf("may not match internal loadbalancer: %q", infrastructure.Status.APIServerInternalURL))) + continue + } + if strings.HasSuffix(currName, ".*") { + withoutSuffix := currName[0 : len(currName)-2] + if strings.HasPrefix(infrastructure.Status.APIServerInternalURL, withoutSuffix) { + errs = append(errs, field.Invalid(path, currName, fmt.Sprintf("may not match internal loadbalancer: %q", infrastructure.Status.APIServerInternalURL))) + } + } + } + } + + return errs +} + +func (a apiserverV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAPIServerV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAPIServerV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateAPIServerSpecUpdate(obj.Spec, oldObj.Spec)...) + errs = append(errs, a.validateSNINames(obj)...) + + return errs +} + +func (apiserverV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAPIServerV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAPIServerV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateAPIServerStatus(obj.Status)...) + + return errs +} + +func validateAPIServerSpecCreate(spec configv1.APIServerSpec) field.ErrorList { + errs := field.ErrorList{} + specPath := field.NewPath("spec") + + errs = append(errs, validateAdditionalCORSAllowedOrigins(specPath.Child("additionalCORSAllowedOrigins"), spec.AdditionalCORSAllowedOrigins)...) + errs = append(errs, validateTLSSecurityProfile(specPath.Child("tlsSecurityProfile"), spec.TLSSecurityProfile)...) + + return errs +} + +func validateAPIServerSpecUpdate(newSpec, oldSpec configv1.APIServerSpec) field.ErrorList { + errs := field.ErrorList{} + specPath := field.NewPath("spec") + + errs = append(errs, validateAdditionalCORSAllowedOrigins(specPath.Child("additionalCORSAllowedOrigins"), newSpec.AdditionalCORSAllowedOrigins)...) + errs = append(errs, validateTLSSecurityProfile(specPath.Child("tlsSecurityProfile"), newSpec.TLSSecurityProfile)...) + + return errs +} + +func validateAPIServerStatus(status configv1.APIServerStatus) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} + +func validateAdditionalCORSAllowedOrigins(fieldPath *field.Path, cors []string) field.ErrorList { + errs := field.ErrorList{} + + for i, re := range cors { + if _, err := regexp.Compile(re); err != nil { + errs = append(errs, field.Invalid(fieldPath.Index(i), re, fmt.Sprintf("not a valid regular expression: %v", err))) + } + } + + return errs +} + +func validateTLSSecurityProfile(fieldPath *field.Path, profile *configv1.TLSSecurityProfile) field.ErrorList { + errs := field.ErrorList{} + + if profile == nil { + return errs + } + + errs = append(errs, validateTLSSecurityProfileType(fieldPath, profile)...) + + if profile.Type == configv1.TLSProfileCustomType && profile.Custom != nil { + errs = append(errs, validateCipherSuites(fieldPath.Child("custom", "ciphers"), profile.Custom.Ciphers, profile.Custom.MinTLSVersion)...) + errs = append(errs, validateMinTLSVersion(fieldPath.Child("custom", "minTLSVersion"), profile.Custom.MinTLSVersion)...) + } + + return errs +} + +func validateTLSSecurityProfileType(fieldPath *field.Path, profile *configv1.TLSSecurityProfile) field.ErrorList { + const typeProfileMismatchFmt = "type set to %s, but the corresponding field is unset" + typePath := fieldPath.Child("type") + + errs := field.ErrorList{} + + availableTypes := []string{ + string(configv1.TLSProfileOldType), + string(configv1.TLSProfileIntermediateType), + string(configv1.TLSProfileCustomType), + } + + switch profile.Type { + case "": + if profile.Old != nil || profile.Intermediate != nil || profile.Modern != nil || profile.Custom != nil { + errs = append(errs, field.Required(typePath, "one of the profiles is set but 'type' field is empty")) + } + case configv1.TLSProfileOldType: + if profile.Old == nil { + errs = append(errs, field.Required(fieldPath.Child("old"), fmt.Sprintf(typeProfileMismatchFmt, profile.Type))) + } + case configv1.TLSProfileIntermediateType: + if profile.Intermediate == nil { + errs = append(errs, field.Required(fieldPath.Child("intermediate"), fmt.Sprintf(typeProfileMismatchFmt, profile.Type))) + } + case configv1.TLSProfileModernType: + errs = append(errs, field.NotSupported(fieldPath.Child("type"), profile.Type, availableTypes)) + case configv1.TLSProfileCustomType: + if profile.Custom == nil { + errs = append(errs, field.Required(fieldPath.Child("custom"), fmt.Sprintf(typeProfileMismatchFmt, profile.Type))) + } + default: + errs = append(errs, field.Invalid(typePath, profile.Type, fmt.Sprintf("unknown type, valid values are: %v", availableTypes))) + } + + return errs +} + +func validateCipherSuites(fieldPath *field.Path, suites []string, version configv1.TLSProtocolVersion) field.ErrorList { + errs := field.ErrorList{} + + if ianaSuites := libgocrypto.OpenSSLToIANACipherSuites(suites); len(ianaSuites) == 0 { + errs = append(errs, field.Invalid(fieldPath, suites, "no supported cipher suite found")) + } + + // Return an error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or + // ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 to prevent the http2 Server + // configuration to return an error when http2 required cipher suites aren't + // provided. + // See: go/x/net/http2.ConfigureServer for futher information. + if version < configv1.VersionTLS13 && !haveRequiredHTTP2CipherSuites(suites) { + errs = append(errs, field.Invalid(fieldPath, suites, "http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of ECDHE-RSA-AES128-GCM-SHA256 or ECDHE-ECDSA-AES128-GCM-SHA256)")) + } + + return errs +} + +func haveRequiredHTTP2CipherSuites(suites []string) bool { + for _, s := range suites { + switch s { + case "ECDHE-RSA-AES128-GCM-SHA256", + // Alternative MTI cipher to not discourage ECDSA-only servers. + // See http://golang.org/cl/30721 for further information. + "ECDHE-ECDSA-AES128-GCM-SHA256": + return true + } + } + return false +} + +func validateMinTLSVersion(fieldPath *field.Path, version configv1.TLSProtocolVersion) field.ErrorList { + errs := field.ErrorList{} + + if version == configv1.VersionTLS13 { + return append(errs, field.NotSupported(fieldPath, version, []string{string(configv1.VersionTLS10), string(configv1.VersionTLS11), string(configv1.VersionTLS12)})) + } + + if _, err := libgocrypto.TLSVersion(string(version)); err != nil { + errs = append(errs, field.Invalid(fieldPath, version, err.Error())) + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver_test.go new file mode 100644 index 0000000000000..54c072363c823 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver_test.go @@ -0,0 +1,286 @@ +package apiserver + +import ( + "testing" + + configv1 "github.com/openshift/api/config/v1" + configclientfake "github.com/openshift/client-go/config/clientset/versioned/fake" + configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func TestValidateSNINames(t *testing.T) { + expectNoErrors := func(t *testing.T, errs field.ErrorList) { + t.Helper() + if len(errs) > 0 { + t.Fatal(errs) + } + } + + tests := []struct { + name string + + internalName string + apiserver *configv1.APIServer + + validateErrors func(t *testing.T, errs field.ErrorList) + }{ + { + name: "no sni", + internalName: "internal.host.com", + apiserver: &configv1.APIServer{}, + validateErrors: expectNoErrors, + }, + { + name: "allowed sni", + internalName: "internal.host.com", + apiserver: &configv1.APIServer{ + Spec: configv1.APIServerSpec{ + ServingCerts: configv1.APIServerServingCerts{ + NamedCertificates: []configv1.APIServerNamedServingCert{ + { + Names: []string{"external.host.com", "somwhere.else.*"}, + }, + }, + }, + }, + }, + validateErrors: expectNoErrors, + }, + { + name: "directly invalid sni", + internalName: "internal.host.com", + apiserver: &configv1.APIServer{ + Spec: configv1.APIServerSpec{ + ServingCerts: configv1.APIServerServingCerts{ + NamedCertificates: []configv1.APIServerNamedServingCert{ + {Names: []string{"external.host.com", "somwhere.else.*"}}, + {Names: []string{"foo.bar", "internal.host.com"}}, + }, + }, + }, + }, + validateErrors: func(t *testing.T, errs field.ErrorList) { + t.Helper() + if len(errs) != 1 { + t.Fatal(errs) + } + if errs[0].Error() != `spec.servingCerts[1].names[1]: Invalid value: "internal.host.com": may not match internal loadbalancer: "internal.host.com"` { + t.Error(errs[0]) + } + }, + }, + { + name: "wildcard invalid sni", + internalName: "internal.host.com", + apiserver: &configv1.APIServer{ + Spec: configv1.APIServerSpec{ + ServingCerts: configv1.APIServerServingCerts{ + NamedCertificates: []configv1.APIServerNamedServingCert{ + {Names: []string{"internal.*"}}, + }, + }, + }, + }, + validateErrors: func(t *testing.T, errs field.ErrorList) { + t.Helper() + if len(errs) != 1 { + t.Fatal(errs) + } + if errs[0].Error() != `spec.servingCerts[0].names[0]: Invalid value: "internal.*": may not match internal loadbalancer: "internal.host.com"` { + t.Error(errs[0]) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeclient := configclientfake.NewSimpleClientset(&configv1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Status: configv1.InfrastructureStatus{ + APIServerInternalURL: test.internalName, + }, + }) + + instance := apiserverV1{ + infrastructureGetter: func() configv1client.InfrastructuresGetter { + return fakeclient.ConfigV1() + }, + } + test.validateErrors(t, instance.validateSNINames(test.apiserver)) + }) + + } +} + +func Test_validateTLSSecurityProfile(t *testing.T) { + rootFieldPath := field.NewPath("testSpec") + + tests := []struct { + name string + profile *configv1.TLSSecurityProfile + want field.ErrorList + }{ + { + name: "nil profile", + profile: nil, + want: field.ErrorList{}, + }, + { + name: "empty profile", + profile: &configv1.TLSSecurityProfile{}, + want: field.ErrorList{}, + }, + { + name: "type does not match set field", + profile: &configv1.TLSSecurityProfile{ + Type: configv1.TLSProfileIntermediateType, + Modern: &configv1.ModernTLSProfile{}, + }, + want: field.ErrorList{ + field.Required(rootFieldPath.Child("intermediate"), "type set to Intermediate, but the corresponding field is unset"), + }, + }, + { + name: "modern type - currently unsupported", + profile: &configv1.TLSSecurityProfile{ + Type: configv1.TLSProfileModernType, + Modern: &configv1.ModernTLSProfile{}, + }, + want: field.ErrorList{ + field.NotSupported(rootFieldPath.Child("type"), configv1.TLSProfileModernType, + []string{ + string(configv1.TLSProfileOldType), + string(configv1.TLSProfileIntermediateType), + string(configv1.TLSProfileCustomType), + }), + }, + }, + { + name: "unknown type", + profile: &configv1.TLSSecurityProfile{ + Type: "something", + }, + want: field.ErrorList{ + field.Invalid(rootFieldPath.Child("type"), "something", "unknown type, valid values are: [Old Intermediate Custom]"), + }, + }, + { + name: "unknown cipher", + profile: &configv1.TLSSecurityProfile{ + Type: "Custom", + Custom: &configv1.CustomTLSProfile{ + TLSProfileSpec: configv1.TLSProfileSpec{ + Ciphers: []string{ + "UNKNOWN_CIPHER", + }, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(rootFieldPath.Child("custom", "ciphers"), []string{"UNKNOWN_CIPHER"}, "no supported cipher suite found"), + field.Invalid(rootFieldPath.Child("custom", "ciphers"), []string{"UNKNOWN_CIPHER"}, "http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of ECDHE-RSA-AES128-GCM-SHA256 or ECDHE-ECDSA-AES128-GCM-SHA256)"), + }, + }, + { + name: "unknown cipher but a normal cipher", + profile: &configv1.TLSSecurityProfile{ + Type: "Custom", + Custom: &configv1.CustomTLSProfile{ + TLSProfileSpec: configv1.TLSProfileSpec{ + Ciphers: []string{ + "UNKNOWN_CIPHER", "ECDHE-RSA-AES128-GCM-SHA256", + }, + }, + }, + }, + want: field.ErrorList{}, + }, + { + name: "no ciphers in custom profile", + profile: &configv1.TLSSecurityProfile{ + Type: "Custom", + Custom: &configv1.CustomTLSProfile{ + TLSProfileSpec: configv1.TLSProfileSpec{}, + }, + }, + want: field.ErrorList{ + field.Invalid(rootFieldPath.Child("custom", "ciphers"), []string(nil), "no supported cipher suite found"), + field.Invalid(rootFieldPath.Child("custom", "ciphers"), []string(nil), "http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of ECDHE-RSA-AES128-GCM-SHA256 or ECDHE-ECDSA-AES128-GCM-SHA256)"), + }, + }, + { + name: "min tls 1.3 - currently unsupported", + profile: &configv1.TLSSecurityProfile{ + Type: "Custom", + Custom: &configv1.CustomTLSProfile{ + TLSProfileSpec: configv1.TLSProfileSpec{ + Ciphers: []string{"ECDHE-ECDSA-CHACHA20-POLY1305"}, + MinTLSVersion: configv1.VersionTLS13, + }, + }, + }, + want: field.ErrorList{ + field.NotSupported(rootFieldPath.Child("custom", "minTLSVersion"), configv1.VersionTLS13, []string{string(configv1.VersionTLS10), string(configv1.VersionTLS11), string(configv1.VersionTLS12)}), + }, + }, + { + name: "custom profile missing required http2 ciphers", + profile: &configv1.TLSSecurityProfile{ + Type: "Custom", + Custom: &configv1.CustomTLSProfile{ + TLSProfileSpec: configv1.TLSProfileSpec{ + Ciphers: []string{ + "ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + }, + MinTLSVersion: configv1.VersionTLS12, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(rootFieldPath.Child("custom", "ciphers"), []string{"ECDSA-AES256-GCM-SHA384", "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-RSA-CHACHA20-POLY1305"}, "http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of ECDHE-RSA-AES128-GCM-SHA256 or ECDHE-ECDSA-AES128-GCM-SHA256)"), + }, + }, + { + name: "custom profile with one required http2 ciphers", + profile: &configv1.TLSSecurityProfile{ + Type: "Custom", + Custom: &configv1.CustomTLSProfile{ + TLSProfileSpec: configv1.TLSProfileSpec{ + Ciphers: []string{ + "ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "ECDHE-RSA-AES128-GCM-SHA256", + }, + MinTLSVersion: configv1.VersionTLS12, + }, + }, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := validateTLSSecurityProfile(rootFieldPath, tt.profile) + + if len(tt.want) != len(got) { + t.Errorf("expected %d errors, got %d: %v", len(tt.want), len(got), got) + return + } + + for i, err := range got { + if err.Error() != tt.want[i].Error() { + t.Errorf("expected %v, got %v", tt.want, got) + break + } + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validation_wrapper.go b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validation_wrapper.go new file mode 100644 index 0000000000000..149361cd1e096 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validation_wrapper.go @@ -0,0 +1,72 @@ +package apiserver + +import ( + "fmt" + "io" + + configv1 "github.com/openshift/api/config/v1" + configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + "github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/rest" +) + +const PluginName = "config.openshift.io/ValidateAPIServer" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return NewValidateAPIServer() + }) +} + +type validateCustomResourceWithClient struct { + admission.ValidationInterface + + infrastructureGetter configv1client.InfrastructuresGetter +} + +func NewValidateAPIServer() (admission.Interface, error) { + ret := &validateCustomResourceWithClient{} + + delegate, err := customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.GroupVersion.WithResource("apiservers").GroupResource(): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("APIServer"): apiserverV1{infrastructureGetter: ret.getInfrastructureGetter}, + }) + if err != nil { + return nil, err + } + ret.ValidationInterface = delegate + + return ret, nil +} + +var _ admissionrestconfig.WantsRESTClientConfig = &validateCustomResourceWithClient{} + +func (a *validateCustomResourceWithClient) getInfrastructureGetter() configv1client.InfrastructuresGetter { + return a.infrastructureGetter +} + +func (a *validateCustomResourceWithClient) SetRESTClientConfig(restClientConfig rest.Config) { + var err error + a.infrastructureGetter, err = configv1client.NewForConfig(&restClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } +} + +func (a *validateCustomResourceWithClient) ValidateInitialization() error { + if a.infrastructureGetter == nil { + return fmt.Errorf(PluginName + " needs an infrastructureGetter") + } + + return nil +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/attributes.go b/openshift-kube-apiserver/admission/customresourcevalidation/attributes.go new file mode 100644 index 0000000000000..0f1f379d576cf --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/attributes.go @@ -0,0 +1,59 @@ +package customresourcevalidation + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + + apiv1 "github.com/openshift/api/apiserver/v1" + authorizationv1 "github.com/openshift/api/authorization/v1" + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" + quotav1 "github.com/openshift/api/quota/v1" + routev1 "github.com/openshift/api/route/v1" + securityv1 "github.com/openshift/api/security/v1" +) + +// unstructuredUnpackingAttributes tries to convert to a real object in the config scheme +type unstructuredUnpackingAttributes struct { + admission.Attributes +} + +func (a *unstructuredUnpackingAttributes) GetObject() runtime.Object { + return toBestObjectPossible(a.Attributes.GetObject()) +} + +func (a *unstructuredUnpackingAttributes) GetOldObject() runtime.Object { + return toBestObjectPossible(a.Attributes.GetOldObject()) +} + +// toBestObjectPossible tries to convert to a real object in the supported scheme +func toBestObjectPossible(orig runtime.Object) runtime.Object { + unstructuredOrig, ok := orig.(runtime.Unstructured) + if !ok { + return orig + } + + targetObj, err := supportedObjectsScheme.New(unstructuredOrig.GetObjectKind().GroupVersionKind()) + if err != nil { + utilruntime.HandleError(err) + return unstructuredOrig + } + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredOrig.UnstructuredContent(), targetObj); err != nil { + utilruntime.HandleError(err) + return unstructuredOrig + } + return targetObj +} + +var supportedObjectsScheme = runtime.NewScheme() + +func init() { + utilruntime.Must(configv1.Install(supportedObjectsScheme)) + utilruntime.Must(operatorv1.Install(supportedObjectsScheme)) + utilruntime.Must(quotav1.Install(supportedObjectsScheme)) + utilruntime.Must(securityv1.Install(supportedObjectsScheme)) + utilruntime.Must(authorizationv1.Install(supportedObjectsScheme)) + utilruntime.Must(apiv1.Install(supportedObjectsScheme)) + utilruntime.Must(routev1.Install(supportedObjectsScheme)) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication.go b/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication.go new file mode 100644 index 0000000000000..26506e47019cf --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication.go @@ -0,0 +1,134 @@ +package authentication + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateAuthentication" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return crvalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.GroupVersion.WithResource("authentications").GroupResource(): true, + }, + map[schema.GroupVersionKind]crvalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Authentication"): authenticationV1{}, + }) + }) +} + +func toAuthenticationV1(uncastObj runtime.Object) (*configv1.Authentication, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + obj, ok := uncastObj.(*configv1.Authentication) + if !ok { + return nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Authentication"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"}), + } + } + + return obj, nil +} + +type authenticationV1 struct{} + +func (authenticationV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, errs := toAuthenticationV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, crvalidation.RequireNameCluster, field.NewPath("metadata"))...) + errs = append(errs, validateAuthenticationSpecCreate(obj.Spec)...) + + return errs +} + +func (authenticationV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAuthenticationV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAuthenticationV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateAuthenticationSpecUpdate(obj.Spec, oldObj.Spec)...) + + return errs +} + +func (authenticationV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAuthenticationV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAuthenticationV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateAuthenticationStatus(obj.Status)...) + + return errs +} + +func validateAuthenticationSpecCreate(spec configv1.AuthenticationSpec) field.ErrorList { + return validateAuthenticationSpec(spec) +} + +func validateAuthenticationSpecUpdate(newspec, oldspec configv1.AuthenticationSpec) field.ErrorList { + return validateAuthenticationSpec(newspec) +} + +func validateAuthenticationSpec(spec configv1.AuthenticationSpec) field.ErrorList { + errs := field.ErrorList{} + specField := field.NewPath("spec") + + if spec.WebhookTokenAuthenticator != nil { + switch spec.Type { + case configv1.AuthenticationTypeNone, configv1.AuthenticationTypeIntegratedOAuth, "": + // validate the secret name in WebhookTokenAuthenticator + errs = append( + errs, + crvalidation.ValidateSecretReference( + specField.Child("webhookTokenAuthenticator").Child("kubeConfig"), + spec.WebhookTokenAuthenticator.KubeConfig, + false, + )..., + ) + default: + errs = append(errs, field.Invalid(specField.Child("webhookTokenAuthenticator"), + spec.WebhookTokenAuthenticator, fmt.Sprintf("this field cannot be set with the %q .spec.type", spec.Type), + )) + } + + } + + errs = append(errs, crvalidation.ValidateConfigMapReference(specField.Child("oauthMetadata"), spec.OAuthMetadata, false)...) + + return errs +} + +func validateAuthenticationStatus(status configv1.AuthenticationStatus) field.ErrorList { + return crvalidation.ValidateConfigMapReference(field.NewPath("status", "integratedOAuthMetadata"), status.IntegratedOAuthMetadata, false) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication_test.go new file mode 100644 index 0000000000000..d93f3f67f6fe9 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication_test.go @@ -0,0 +1,179 @@ +package authentication + +import ( + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func TestFailValidateAuthenticationSpec(t *testing.T) { + errorCases := map[string]struct { + spec configv1.AuthenticationSpec + errorType field.ErrorType + errorField string + }{ + "invalid metadata ref": { + spec: configv1.AuthenticationSpec{ + Type: "", + OAuthMetadata: configv1.ConfigMapNameReference{ + Name: "../shadow", + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.oauthMetadata.name", + }, + "invalid webhook ref": { + spec: configv1.AuthenticationSpec{ + WebhookTokenAuthenticator: &configv1.WebhookTokenAuthenticator{ + KubeConfig: configv1.SecretNameReference{Name: "this+that"}, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.webhookTokenAuthenticator.kubeConfig.name", + }, + "valid webhook ref": { + spec: configv1.AuthenticationSpec{ + WebhookTokenAuthenticator: &configv1.WebhookTokenAuthenticator{ + KubeConfig: configv1.SecretNameReference{Name: "this"}, + }, + }, + }, + "invalid webhook ref for a Type": { + spec: configv1.AuthenticationSpec{ + Type: "OIDC", + WebhookTokenAuthenticator: &configv1.WebhookTokenAuthenticator{ + KubeConfig: configv1.SecretNameReference{Name: "this"}, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.webhookTokenAuthenticator", + }, + } + + for tcName, tc := range errorCases { + errs := validateAuthenticationSpec(tc.spec) + if (len(errs) > 0) != (len(tc.errorType) != 0) { + t.Errorf("'%s': expected failure: %t, got: %t", tcName, len(tc.errorType) != 0, len(errs) > 0) + } + + for _, e := range errs { + if e.Type != tc.errorType { + t.Errorf("'%s': expected errors of type '%s', got %v:", tcName, tc.errorType, e) + } + + if e.Field != tc.errorField { + t.Errorf("'%s': expected errors in field '%s', got %v:", tcName, tc.errorField, e) + } + } + } +} + +func TestSucceedValidateAuthenticationSpec(t *testing.T) { + successCases := map[string]configv1.AuthenticationSpec{ + "integrated oauth authn type": { + Type: "IntegratedOAuth", + }, + "_none_ authn type": { + Type: "None", + }, + "empty authn type": { + Type: "", + }, + "integrated oauth + oauth metadata": { + OAuthMetadata: configv1.ConfigMapNameReference{ + Name: "configmapwithmetadata", + }, + }, + "webhook set": { + WebhookTokenAuthenticators: []configv1.DeprecatedWebhookTokenAuthenticator{ + {KubeConfig: configv1.SecretNameReference{Name: "wheniwaslittleiwantedtobecomeawebhook"}}, + }, + }, + "some webhooks": { + WebhookTokenAuthenticators: []configv1.DeprecatedWebhookTokenAuthenticator{ + {KubeConfig: configv1.SecretNameReference{Name: "whatacoolnameforasecret"}}, + {KubeConfig: configv1.SecretNameReference{Name: "whatacoolnameforasecret2"}}, + {KubeConfig: configv1.SecretNameReference{Name: "thisalsoisacoolname"}}, + {KubeConfig: configv1.SecretNameReference{Name: "letsnotoverdoit"}}, + }, + }, + "all fields set": { + Type: "IntegratedOAuth", + OAuthMetadata: configv1.ConfigMapNameReference{ + Name: "suchname", + }, + WebhookTokenAuthenticators: []configv1.DeprecatedWebhookTokenAuthenticator{ + {KubeConfig: configv1.SecretNameReference{Name: "thisisawebhook"}}, + {KubeConfig: configv1.SecretNameReference{Name: "thisisawebhook2"}}, + {KubeConfig: configv1.SecretNameReference{Name: "thisisawebhook33"}}, + }, + }, + } + + for tcName, s := range successCases { + errs := validateAuthenticationSpec(s) + if len(errs) != 0 { + t.Errorf("'%s': expected success, but failed: %v", tcName, errs.ToAggregate().Error()) + } + } +} + +func TestFailValidateAuthenticationStatus(t *testing.T) { + errorCases := map[string]struct { + status configv1.AuthenticationStatus + errorType field.ErrorType + errorField string + }{ + "wrong reference name": { + status: configv1.AuthenticationStatus{ + IntegratedOAuthMetadata: configv1.ConfigMapNameReference{ + Name: "something_wrong", + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "status.integratedOAuthMetadata.name", + }, + } + + for tcName, tc := range errorCases { + errs := validateAuthenticationStatus(tc.status) + if len(errs) == 0 { + t.Errorf("'%s': should have failed but did not", tcName) + } + + for _, e := range errs { + if e.Type != tc.errorType { + t.Errorf("'%s': expected errors of type '%s', got %v:", tcName, tc.errorType, e) + } + + if e.Field != tc.errorField { + t.Errorf("'%s': expected errors in field '%s', got %v:", tcName, tc.errorField, e) + } + } + } +} + +func TestSucceedValidateAuthenticationStatus(t *testing.T) { + successCases := map[string]configv1.AuthenticationStatus{ + "basic case": { + IntegratedOAuthMetadata: configv1.ConfigMapNameReference{ + Name: "hey-there", + }, + }, + "empty reference": { + IntegratedOAuthMetadata: configv1.ConfigMapNameReference{ + Name: "", + }, + }, + "empty status": {}, + } + + for tcName, s := range successCases { + errs := validateAuthenticationStatus(s) + if len(errs) != 0 { + t.Errorf("'%s': expected success, but failed: %v", tcName, errs.ToAggregate().Error()) + } + } + +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validate_crq.go b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validate_crq.go new file mode 100644 index 0000000000000..8cdfd33de381f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validate_crq.go @@ -0,0 +1,84 @@ +package clusterresourcequota + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + quotav1 "github.com/openshift/api/quota/v1" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" + quotavalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation" +) + +const PluginName = "quota.openshift.io/ValidateClusterResourceQuota" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + {Group: quotav1.GroupName, Resource: "clusterresourcequotas"}: true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + quotav1.GroupVersion.WithKind("ClusterResourceQuota"): clusterResourceQuotaV1{}, + }) + }) +} + +func toClusterResourceQuota(uncastObj runtime.Object) (*quotav1.ClusterResourceQuota, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*quotav1.ClusterResourceQuota) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"ClusterResourceQuota"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{quotav1.GroupVersion.String()})) + } + + return obj, nil +} + +type clusterResourceQuotaV1 struct { +} + +func (clusterResourceQuotaV1) ValidateCreate(_ context.Context, obj runtime.Object) field.ErrorList { + clusterResourceQuotaObj, errs := toClusterResourceQuota(obj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&clusterResourceQuotaObj.ObjectMeta, false, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, quotavalidation.ValidateClusterResourceQuota(clusterResourceQuotaObj)...) + + return errs +} + +func (clusterResourceQuotaV1) ValidateUpdate(_ context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + clusterResourceQuotaObj, errs := toClusterResourceQuota(obj) + if len(errs) > 0 { + return errs + } + clusterResourceQuotaOldObj, errs := toClusterResourceQuota(oldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&clusterResourceQuotaObj.ObjectMeta, false, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, quotavalidation.ValidateClusterResourceQuotaUpdate(clusterResourceQuotaObj, clusterResourceQuotaOldObj)...) + + return errs +} + +func (c clusterResourceQuotaV1) ValidateStatusUpdate(ctx context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + return c.ValidateUpdate(ctx, obj, oldObj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation.go b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation.go new file mode 100644 index 0000000000000..7bc1767497bb5 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation.go @@ -0,0 +1,68 @@ +package validation + +import ( + "sort" + + unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/v1" + "k8s.io/kubernetes/pkg/apis/core/validation" + + quotav1 "github.com/openshift/api/quota/v1" +) + +func ValidateClusterResourceQuota(clusterquota *quotav1.ClusterResourceQuota) field.ErrorList { + allErrs := validation.ValidateObjectMeta(&clusterquota.ObjectMeta, false, validation.ValidateResourceQuotaName, field.NewPath("metadata")) + + hasSelectionCriteria := (clusterquota.Spec.Selector.LabelSelector != nil && len(clusterquota.Spec.Selector.LabelSelector.MatchLabels)+len(clusterquota.Spec.Selector.LabelSelector.MatchExpressions) > 0) || + (len(clusterquota.Spec.Selector.AnnotationSelector) > 0) + + if !hasSelectionCriteria { + allErrs = append(allErrs, field.Required(field.NewPath("spec", "selector"), "must restrict the selected projects")) + } + if clusterquota.Spec.Selector.LabelSelector != nil { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(clusterquota.Spec.Selector.LabelSelector, unversionedvalidation.LabelSelectorValidationOptions{}, field.NewPath("spec", "selector", "labels"))...) + if len(clusterquota.Spec.Selector.LabelSelector.MatchLabels)+len(clusterquota.Spec.Selector.LabelSelector.MatchExpressions) == 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "selector", "labels"), clusterquota.Spec.Selector.LabelSelector, "must restrict the selected projects")) + } + } + if clusterquota.Spec.Selector.AnnotationSelector != nil { + allErrs = append(allErrs, validation.ValidateAnnotations(clusterquota.Spec.Selector.AnnotationSelector, field.NewPath("spec", "selector", "annotations"))...) + } + + internalQuota := &core.ResourceQuotaSpec{} + if err := v1.Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(&clusterquota.Spec.Quota, internalQuota, nil); err != nil { + panic(err) + } + internalStatus := &core.ResourceQuotaStatus{} + if err := v1.Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(&clusterquota.Status.Total, internalStatus, nil); err != nil { + panic(err) + } + + allErrs = append(allErrs, validation.ValidateResourceQuotaSpec(internalQuota, field.NewPath("spec", "quota"))...) + allErrs = append(allErrs, validation.ValidateResourceQuotaStatus(internalStatus, field.NewPath("status", "overall"))...) + + orderedNamespaces := clusterquota.Status.Namespaces.DeepCopy() + sort.Slice(orderedNamespaces, func(i, j int) bool { + return orderedNamespaces[i].Namespace < orderedNamespaces[j].Namespace + }) + + for _, namespace := range orderedNamespaces { + fldPath := field.NewPath("status", "namespaces").Key(namespace.Namespace) + for k, v := range namespace.Status.Used { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, validation.ValidateResourceQuotaResourceName(core.ResourceName(k), resPath)...) + allErrs = append(allErrs, validation.ValidateResourceQuantityValue(core.ResourceName(k), v, resPath)...) + } + } + + return allErrs +} + +func ValidateClusterResourceQuotaUpdate(clusterquota, oldClusterResourceQuota *quotav1.ClusterResourceQuota) field.ErrorList { + allErrs := validation.ValidateObjectMetaUpdate(&clusterquota.ObjectMeta, &oldClusterResourceQuota.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateClusterResourceQuota(clusterquota)...) + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation_test.go new file mode 100644 index 0000000000000..c1dbf76aecf46 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation_test.go @@ -0,0 +1,173 @@ +package validation + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core" + corekubev1 "k8s.io/kubernetes/pkg/apis/core/v1" + "k8s.io/kubernetes/pkg/apis/core/validation" + + quotav1 "github.com/openshift/api/quota/v1" +) + +func spec(scopes ...corev1.ResourceQuotaScope) corev1.ResourceQuotaSpec { + return corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100"), + corev1.ResourceMemory: resource.MustParse("10000"), + corev1.ResourceRequestsCPU: resource.MustParse("100"), + corev1.ResourceRequestsMemory: resource.MustParse("10000"), + corev1.ResourceLimitsCPU: resource.MustParse("100"), + corev1.ResourceLimitsMemory: resource.MustParse("10000"), + corev1.ResourcePods: resource.MustParse("10"), + corev1.ResourceServices: resource.MustParse("0"), + corev1.ResourceReplicationControllers: resource.MustParse("10"), + corev1.ResourceQuotas: resource.MustParse("10"), + corev1.ResourceConfigMaps: resource.MustParse("10"), + corev1.ResourceSecrets: resource.MustParse("10"), + }, + Scopes: scopes, + } +} + +func scopeableSpec(scopes ...corev1.ResourceQuotaScope) corev1.ResourceQuotaSpec { + return corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100"), + corev1.ResourceMemory: resource.MustParse("10000"), + corev1.ResourceRequestsCPU: resource.MustParse("100"), + corev1.ResourceRequestsMemory: resource.MustParse("10000"), + corev1.ResourceLimitsCPU: resource.MustParse("100"), + corev1.ResourceLimitsMemory: resource.MustParse("10000"), + }, + Scopes: scopes, + } +} + +func TestValidationClusterQuota(t *testing.T) { + // storage is not yet supported as a quota tracked resource + invalidQuotaResourceSpec := corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10"), + }, + } + validLabels := map[string]string{"a": "b"} + + errs := ValidateClusterResourceQuota( + "av1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Selector: quotav1.ClusterResourceQuotaSelector{LabelSelector: &metav1.LabelSelector{MatchLabels: validLabels}}, + Quota: spec(), + }, + }, + ) + if len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + + errorCases := map[string]struct { + A quotav1.ClusterResourceQuota + T field.ErrorType + F string + }{ + "non-zero-length namespace": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Namespace: "bad", Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Selector: quotav1.ClusterResourceQuotaSelector{LabelSelector: &metav1.LabelSelector{MatchLabels: validLabels}}, + Quota: spec(), + }, + }, + T: field.ErrorTypeForbidden, + F: "metadata.namespace", + }, + "missing label selector": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Quota: spec(), + }, + }, + T: field.ErrorTypeRequired, + F: "spec.selector", + }, + "ok scope": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Quota: scopeableSpec(corev1.ResourceQuotaScopeNotTerminating), + }, + }, + T: field.ErrorTypeRequired, + F: "spec.selector", + }, + "bad scope": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Selector: quotav1.ClusterResourceQuotaSelector{LabelSelector: &metav1.LabelSelector{MatchLabels: validLabels}}, + Quota: spec(corev1.ResourceQuotaScopeNotTerminating), + }, + }, + T: field.ErrorTypeInvalid, + F: "spec.quota.scopes", + }, + "bad quota spec": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Selector: quotav1.ClusterResourceQuotaSelector{LabelSelector: &metav1.LabelSelector{MatchLabels: validLabels}}, + Quota: invalidQuotaResourceSpec, + }, + }, + T: field.ErrorTypeInvalid, + F: "spec.quota.hard[storage]", + }, + } + for k, v := range errorCases { + errs := ValidateClusterResourceQuota(&v.A) + if len(errs) == 0 { + t.Errorf("expected failure %s for %v", k, v.A) + continue + } + for i := range errs { + if errs[i].Type != v.T { + t.Errorf("%s: expected errors to have type %s: %v", k, v.T, errs[i]) + } + if errs[i].Field != v.F { + t.Errorf("%s: expected errors to have field %s: %v", k, v.F, errs[i]) + } + } + } +} + +func TestValidationQuota(t *testing.T) { + tests := map[string]struct { + A corev1.ResourceQuota + T field.ErrorType + F string + }{ + "scope": { + A: corev1.ResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "good"}, + Spec: scopeableSpec(corev1.ResourceQuotaScopeNotTerminating), + }, + }, + } + for k, v := range tests { + internal := core.ResourceQuota{} + if err := corekubev1.Convert_v1_ResourceQuota_To_core_ResourceQuota(&v.A, &internal, nil); err != nil { + panic(err) + } + errs := validation.ValidateResourceQuota(&internal) + if len(errs) != 0 { + t.Errorf("%s: %v", k, errs) + continue + } + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource.go b/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource.go new file mode 100644 index 0000000000000..f637e95cece3a --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource.go @@ -0,0 +1,54 @@ +package config + +import ( + "context" + "fmt" + "io" + + "k8s.io/apiserver/pkg/admission" +) + +const PluginName = "config.openshift.io/DenyDeleteClusterConfiguration" + +// Register registers an admission plugin factory whose plugin prevents the deletion of cluster configuration resources. +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return newAdmissionPlugin(), nil + }) +} + +var _ admission.ValidationInterface = &admissionPlugin{} + +type admissionPlugin struct { + *admission.Handler +} + +func newAdmissionPlugin() *admissionPlugin { + return &admissionPlugin{Handler: admission.NewHandler(admission.Delete)} +} + +// Validate returns an error if there is an attempt to delete a cluster configuration resource. +func (p *admissionPlugin) Validate(ctx context.Context, attributes admission.Attributes, _ admission.ObjectInterfaces) error { + if len(attributes.GetSubresource()) > 0 { + return nil + } + if attributes.GetResource().Group != "config.openshift.io" { + return nil + } + // clusteroperators can be deleted so that we can force status refreshes and change over time. + // clusterversions not named `version` can be deleted (none are expected to exist). + // other config.openshift.io resources not named `cluster` can be deleted (none are expected to exist). + switch attributes.GetResource().Resource { + case "clusteroperators": + return nil + case "clusterversions": + if attributes.GetName() != "version" { + return nil + } + default: + if attributes.GetName() != "cluster" { + return nil + } + } + return admission.NewForbidden(attributes, fmt.Errorf("deleting required %s.%s resource, named %s, is not allowed", attributes.GetResource().Resource, attributes.GetResource().Group, attributes.GetName())) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource_test.go new file mode 100644 index 0000000000000..70d289f5f26df --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource_test.go @@ -0,0 +1,73 @@ +package config + +import ( + "context" + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" +) + +func TestAdmissionPlugin_Validate(t *testing.T) { + testCases := []struct { + tcName string + group string + resource string + name string + denyDelete bool + }{ + { + tcName: "NotWhiteListedResourceNamedCluster", + group: "config.openshift.io", + resource: "notWhitelisted", + name: "cluster", + denyDelete: true, + }, + { + tcName: "NotWhiteListedResourceNotNamedCluster", + group: "config.openshift.io", + resource: "notWhitelisted", + name: "notCluster", + denyDelete: false, + }, + { + tcName: "ClusterVersionVersion", + group: "config.openshift.io", + resource: "clusterversions", + name: "version", + denyDelete: true, + }, + { + tcName: "ClusterVersionNotVersion", + group: "config.openshift.io", + resource: "clusterversions", + name: "instance", + denyDelete: false, + }, + { + tcName: "ClusterOperator", + group: "config.openshift.io", + resource: "clusteroperator", + name: "instance", + denyDelete: false, + }, + { + tcName: "OtherGroup", + group: "not.config.openshift.io", + resource: "notWhitelisted", + name: "cluster", + denyDelete: false, + }, + } + for _, tc := range testCases { + t.Run(tc.tcName, func(t *testing.T) { + err := newAdmissionPlugin().Validate(context.TODO(), admission.NewAttributesRecord( + nil, nil, schema.GroupVersionKind{}, "", + tc.name, schema.GroupVersionResource{Group: tc.group, Resource: tc.resource}, + "", admission.Delete, nil, false, nil), nil) + if tc.denyDelete != (err != nil) { + t.Error(tc.denyDelete, err) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/console/validate_console.go b/openshift-kube-apiserver/admission/customresourcevalidation/console/validate_console.go new file mode 100644 index 0000000000000..8f60bbe73c128 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/console/validate_console.go @@ -0,0 +1,119 @@ +package console + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateConsole" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.GroupVersion.WithResource("consoles").GroupResource(): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Console"): consoleV1{}, + }) + }) +} + +func toConsoleV1(uncastObj runtime.Object) (*configv1.Console, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + errs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Console) + if !ok { + return nil, append(errs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Console"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type consoleV1 struct{} + +func (consoleV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, errs := toConsoleV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + errs = append(errs, validateConsoleSpecCreate(obj.Spec)...) + + return errs +} + +func (consoleV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toConsoleV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toConsoleV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateConsoleSpecUpdate(obj.Spec, oldObj.Spec)...) + + return errs +} + +func (consoleV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toConsoleV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toConsoleV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateConsoleStatus(obj.Status)...) + + return errs +} + +func validateConsoleSpecCreate(spec configv1.ConsoleSpec) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} + +func validateConsoleSpecUpdate(newSpec, oldSpec configv1.ConsoleSpec) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} + +func validateConsoleStatus(status configv1.ConsoleStatus) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go new file mode 100644 index 0000000000000..76bdd704ec165 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go @@ -0,0 +1,92 @@ +package customresourcevalidationregistration + +import ( + "k8s.io/apiserver/pkg/admission" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/apirequestcount" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/apiserver" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/authentication" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/config" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/console" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/dns" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/features" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/image" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/network" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/node" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/oauth" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/operator" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/project" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/route" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/scheduler" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints" +) + +// AllCustomResourceValidators are the names of all custom resource validators that should be registered +var AllCustomResourceValidators = []string{ + apiserver.PluginName, + authentication.PluginName, + features.PluginName, + console.PluginName, + dns.PluginName, + image.PluginName, + oauth.PluginName, + project.PluginName, + config.PluginName, + operator.PluginName, + scheduler.PluginName, + clusterresourcequota.PluginName, + securitycontextconstraints.PluginName, + rolebindingrestriction.PluginName, + network.PluginName, + apirequestcount.PluginName, + node.PluginName, + route.DefaultingPluginName, + route.PluginName, + + // the kubecontrollermanager operator resource has to exist in order to run deployments to deploy admission webhooks. + kubecontrollermanager.PluginName, + + // this one is special because we don't work without it. + securitycontextconstraints.DefaultingPluginName, +} + +func RegisterCustomResourceValidation(plugins *admission.Plugins) { + apiserver.Register(plugins) + authentication.Register(plugins) + features.Register(plugins) + console.Register(plugins) + dns.Register(plugins) + image.Register(plugins) + oauth.Register(plugins) + project.Register(plugins) + config.Register(plugins) + operator.Register(plugins) + scheduler.Register(plugins) + kubecontrollermanager.Register(plugins) + + // This plugin validates the quota.openshift.io/v1 ClusterResourceQuota resources. + // NOTE: This is only allowed because it is required to get a running control plane operator. + clusterresourcequota.Register(plugins) + // This plugin validates the security.openshift.io/v1 SecurityContextConstraints resources. + securitycontextconstraints.Register(plugins) + // This plugin validates the authorization.openshift.io/v1 RoleBindingRestriction resources. + rolebindingrestriction.Register(plugins) + // This plugin validates the network.config.openshift.io object for service node port range changes + network.Register(plugins) + // This plugin validates the apiserver.openshift.io/v1 APIRequestCount resources. + apirequestcount.Register(plugins) + // This plugin validates config.openshift.io/v1/node objects + node.Register(plugins) + + // this one is special because we don't work without it. + securitycontextconstraints.RegisterDefaulting(plugins) + + // Requests to route.openshift.io/v1 should only go through kube-apiserver admission if + // served via CRD. Most OpenShift flavors (including vanilla) will continue to do validation + // and defaulting inside openshift-apiserver. + route.Register(plugins) + route.RegisterDefaulting(plugins) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator.go b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator.go new file mode 100644 index 0000000000000..94f763ea2ca30 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator.go @@ -0,0 +1,101 @@ +package customresourcevalidation + +import ( + "context" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" +) + +// ObjectValidator validates a given resource across create, +// update and status update ops. +type ObjectValidator interface { + // TODO: add router validation logic with ctx, remove this todo once added + ValidateCreate(ctx context.Context, obj runtime.Object) field.ErrorList + ValidateUpdate(ctx context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList + ValidateStatusUpdate(ctx context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList +} + +// ValidateCustomResource is an implementation of admission.Interface. +// It looks at all new pods and overrides each container's image pull policy to Always. +type validateCustomResource struct { + *admission.Handler + + resources map[schema.GroupResource]bool + validators map[schema.GroupVersionKind]ObjectValidator +} + +func NewValidator(resources map[schema.GroupResource]bool, validators map[schema.GroupVersionKind]ObjectValidator) (admission.ValidationInterface, error) { + return &validateCustomResource{ + Handler: admission.NewHandler(admission.Create, admission.Update), + resources: resources, + validators: validators, + }, nil +} + +var _ admission.ValidationInterface = &validateCustomResource{} + +// Validate is an admission function that will validate a CRD in config.openshift.io. uncastAttributes are attributes +// that are of type unstructured. +func (a *validateCustomResource) Validate(ctx context.Context, uncastAttributes admission.Attributes, _ admission.ObjectInterfaces) error { + attributes := &unstructuredUnpackingAttributes{Attributes: uncastAttributes} + if a.shouldIgnore(attributes) { + return nil + } + validator, ok := a.validators[attributes.GetKind()] + if !ok { + return admission.NewForbidden(attributes, fmt.Errorf("unhandled kind: %v", attributes.GetKind())) + } + + switch attributes.GetOperation() { + case admission.Create: + // creating subresources isn't something we understand, but we can be pretty sure we don't need to validate it + if len(attributes.GetSubresource()) > 0 { + return nil + } + errors := validator.ValidateCreate(ctx, attributes.GetObject()) + if len(errors) == 0 { + return nil + } + return apierrors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errors) + + case admission.Update: + switch attributes.GetSubresource() { + case "": + errors := validator.ValidateUpdate(ctx, attributes.GetObject(), attributes.GetOldObject()) + if len(errors) == 0 { + return nil + } + return apierrors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errors) + + case "status": + errors := validator.ValidateStatusUpdate(ctx, attributes.GetObject(), attributes.GetOldObject()) + if len(errors) == 0 { + return nil + } + return apierrors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errors) + + default: + return admission.NewForbidden(attributes, fmt.Errorf("unhandled subresource: %v", attributes.GetSubresource())) + } + + default: + return admission.NewForbidden(attributes, fmt.Errorf("unhandled operation: %v", attributes.GetOperation())) + } +} + +func (a *validateCustomResource) shouldIgnore(attributes admission.Attributes) bool { + if !a.resources[attributes.GetResource().GroupResource()] { + return true + } + // if a subresource is specified and it isn't status, skip it + if len(attributes.GetSubresource()) > 0 && attributes.GetSubresource() != "status" { + return true + } + + return false +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator_test.go new file mode 100644 index 0000000000000..6fa92c79e8604 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator_test.go @@ -0,0 +1,278 @@ +package customresourcevalidation + +import ( + "context" + "errors" + "fmt" + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + + configv1 "github.com/openshift/api/config/v1" +) + +func TestCustomResourceValidator(t *testing.T) { + + const ( + testGroup = "config.openshift.io" + testVersion = "v1" + testResource = "images" + testKind = "Image" + ) + + var testObjectType *configv1.Image + + testCases := []struct { + description string + object runtime.Object + objectBytes []byte + oldObject runtime.Object + oldObjectBytes []byte + kind schema.GroupVersionKind + namespace string + name string + resource schema.GroupVersionResource + subresource string + operation admission.Operation + userInfo user.Info + expectError bool + expectCreateFuncCalled bool + expectUpdateFuncCalled bool + expectStatusUpdateFuncCalled bool + validateFuncErr bool + expectedObjectType interface{} + }{ + { + description: "ShouldIgnoreUnknownResource", + resource: schema.GroupVersionResource{ + Group: "other_group", + Version: "other_version", + Resource: "other_resource", + }, + }, + { + description: "ShouldIgnoreUnknownSubresource", + subresource: "not_status", + }, + { + description: "ShouldIgnoreUnknownSubresource", + subresource: "not_status", + }, + { + description: "UnhandledOperationConnect", + operation: admission.Connect, + expectError: true, + }, + { + description: "UnhandledOperationDelete", + operation: admission.Delete, + expectError: true, + }, + { + description: "UnhandledKind", + operation: admission.Create, + kind: schema.GroupVersionKind{ + Group: "other_group", + Version: "other_version", + Kind: "other_resource", + }, + expectError: true, + }, + { + description: "Create", + operation: admission.Create, + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + expectCreateFuncCalled: true, + expectedObjectType: testObjectType, + }, + { + description: "CreateSubresourceNope", + operation: admission.Create, + subresource: "status", + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + }, + { + description: "CreateError", + operation: admission.Create, + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + validateFuncErr: true, + expectCreateFuncCalled: true, + expectError: true, + }, + { + description: "Update", + operation: admission.Update, + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + oldObjectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + expectUpdateFuncCalled: true, + expectedObjectType: testObjectType, + }, + { + description: "UpdateError", + operation: admission.Update, + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + oldObjectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + validateFuncErr: true, + expectError: true, + }, + { + description: "UpdateStatus", + operation: admission.Update, + subresource: "status", + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + oldObjectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + expectStatusUpdateFuncCalled: true, + expectedObjectType: testObjectType, + }, + { + description: "UpdateStatusError", + operation: admission.Update, + subresource: "status", + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + oldObjectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + expectStatusUpdateFuncCalled: true, + validateFuncErr: true, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + + var createFuncCalled bool + var updateFuncCalled bool + var updateStatusFuncCalled bool + var funcArgObject runtime.Object + var funcArgOldObject runtime.Object + + handler, err := NewValidator( + map[schema.GroupResource]bool{ + {Group: testGroup, Resource: testResource}: true, + }, + map[schema.GroupVersionKind]ObjectValidator{ + {Group: testGroup, Version: testVersion, Kind: testKind}: testValidator{ + validateCreate: func(_ context.Context, obj runtime.Object) field.ErrorList { + createFuncCalled = true + if tc.validateFuncErr { + return field.ErrorList{field.InternalError(field.NewPath("test"), errors.New("TEST Error"))} + } + funcArgObject = obj + return nil + }, + validateUpdate: func(_ context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + if tc.validateFuncErr { + return field.ErrorList{field.InternalError(field.NewPath("test"), errors.New("TEST Error"))} + } + updateFuncCalled = true + funcArgObject = obj + funcArgOldObject = oldObj + return nil + }, + validateStatusUpdate: func(_ context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + updateStatusFuncCalled = true + if tc.validateFuncErr { + return field.ErrorList{field.InternalError(field.NewPath("test"), errors.New("TEST Error"))} + } + funcArgObject = obj + funcArgOldObject = oldObj + return nil + }, + }, + }, + ) + if err != nil { + t.Fatal(err) + } + validator := handler.(admission.ValidationInterface) + + if len(tc.objectBytes) > 0 { + object, kind, err := unstructured.UnstructuredJSONScheme.Decode(tc.objectBytes, nil, nil) + if err != nil { + t.Fatal(err) + } + tc.object = object.(runtime.Object) + tc.kind = *kind + } + + if len(tc.oldObjectBytes) > 0 { + object, kind, err := unstructured.UnstructuredJSONScheme.Decode(tc.oldObjectBytes, nil, nil) + if err != nil { + t.Fatal(err) + } + tc.oldObject = object.(runtime.Object) + tc.kind = *kind + } + + if tc.resource == (schema.GroupVersionResource{}) { + tc.resource = schema.GroupVersionResource{ + Group: testGroup, + Version: testVersion, + Resource: testResource, + } + } + + attributes := admission.NewAttributesRecord( + tc.object, + tc.oldObject, + tc.kind, + tc.namespace, + tc.name, + tc.resource, + tc.subresource, + tc.operation, + nil, + false, + tc.userInfo, + ) + + err = validator.Validate(context.TODO(), attributes, nil) + switch { + case tc.expectError && err == nil: + t.Error("Error expected") + case !tc.expectError && err != nil: + t.Errorf("Unexpected error: %v", err) + } + if tc.expectCreateFuncCalled != createFuncCalled { + t.Errorf("ValidateObjCreateFunc called: expected: %v, actual: %v", tc.expectCreateFuncCalled, createFuncCalled) + } + if tc.expectUpdateFuncCalled != updateFuncCalled { + t.Errorf("ValidateObjUpdateFunc called: expected: %v, actual: %v", tc.expectUpdateFuncCalled, updateFuncCalled) + } + if tc.expectStatusUpdateFuncCalled != updateStatusFuncCalled { + t.Errorf("ValidateStatusUpdateFunc called: expected: %v, actual: %v", tc.expectStatusUpdateFuncCalled, updateStatusFuncCalled) + } + if reflect.TypeOf(tc.expectedObjectType) != reflect.TypeOf(funcArgObject) { + t.Errorf("Expected %T, actual %T", tc.expectedObjectType, funcArgObject) + } + if (tc.oldObject != nil) && (reflect.TypeOf(tc.expectedObjectType) != reflect.TypeOf(funcArgOldObject)) { + t.Errorf("Expected %T, actual %T", tc.expectedObjectType, funcArgOldObject) + } + }) + } + +} + +type testValidator struct { + validateCreate func(_ context.Context, uncastObj runtime.Object) field.ErrorList + validateUpdate func(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList + validateStatusUpdate func(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList +} + +func (v testValidator) ValidateCreate(ctx context.Context, uncastObj runtime.Object) field.ErrorList { + return v.validateCreate(ctx, uncastObj) +} + +func (v testValidator) ValidateUpdate(ctx context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + return v.validateUpdate(ctx, uncastObj, uncastOldObj) + +} + +func (v testValidator) ValidateStatusUpdate(ctx context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + return v.validateStatusUpdate(ctx, uncastObj, uncastOldObj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns.go b/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns.go new file mode 100644 index 0000000000000..0ae18e8f7e684 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns.go @@ -0,0 +1,242 @@ +package dns + +import ( + "context" + "fmt" + "io" + "reflect" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/validation" + unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + api "k8s.io/kubernetes/pkg/apis/core" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" + + operatorv1 "github.com/openshift/api/operator/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "operator.openshift.io/ValidateDNS" + +// Register registers the DNS validation plugin. +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return crvalidation.NewValidator( + map[schema.GroupResource]bool{ + operatorv1.GroupVersion.WithResource("dnses").GroupResource(): true, + }, + map[schema.GroupVersionKind]crvalidation.ObjectValidator{ + operatorv1.GroupVersion.WithKind("DNS"): dnsV1{}, + }) + }) +} + +// toDNSV1 converts a runtime object to a versioned DNS. +func toDNSV1(uncastObj runtime.Object) (*operatorv1.DNS, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + obj, ok := uncastObj.(*operatorv1.DNS) + if !ok { + return nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"DNS"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"operator.openshift.io/v1"}), + } + } + + return obj, nil +} + +// dnsV1 is runtime object that is validated as a versioned DNS. +type dnsV1 struct{} + +// ValidateCreate validates a DNS that is being created. +func (dnsV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, errs := toDNSV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, validateDNSSpecCreate(obj.Spec)...) + + return errs +} + +// ValidateUpdate validates a DNS that is being updated. +func (dnsV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toDNSV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toDNSV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateDNSSpecUpdate(obj.Spec, oldObj.Spec)...) + + return errs +} + +// ValidateStatusUpdate validates a DNS status that is being updated. +func (dnsV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toDNSV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toDNSV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} + +// validateDNSSpecCreate validates the spec of a DNS that is being created. +func validateDNSSpecCreate(spec operatorv1.DNSSpec) field.ErrorList { + var errs field.ErrorList + specField := field.NewPath("spec") + errs = append(errs, validateDNSNodePlacement(spec.NodePlacement, specField.Child("nodePlacement"))...) + errs = append(errs, validateUpstreamResolversCreate(spec.UpstreamResolvers, specField.Child("upstreamResolvers"))...) + errs = append(errs, validateServersCreate(spec.Servers, specField.Child("servers"))...) + return errs +} + +// validateDNSSpecUpdate validates the spec of a DNS that is being updated. +func validateDNSSpecUpdate(newspec, oldspec operatorv1.DNSSpec) field.ErrorList { + var errs field.ErrorList + specField := field.NewPath("spec") + errs = append(errs, validateDNSNodePlacement(newspec.NodePlacement, specField.Child("nodePlacement"))...) + errs = append(errs, validateUpstreamResolversUpdate(newspec.UpstreamResolvers, oldspec.UpstreamResolvers, specField.Child("upstreamResolvers"))...) + errs = append(errs, validateServersUpdate(newspec.Servers, oldspec.Servers, specField.Child("servers"))...) + return errs +} + +// validateDNSSpec validates the spec.nodePlacement field of a DNS. +func validateDNSNodePlacement(nodePlacement operatorv1.DNSNodePlacement, fldPath *field.Path) field.ErrorList { + var errs field.ErrorList + if len(nodePlacement.NodeSelector) != 0 { + errs = append(errs, unversionedvalidation.ValidateLabels(nodePlacement.NodeSelector, fldPath.Child("nodeSelector"))...) + } + if len(nodePlacement.Tolerations) != 0 { + errs = append(errs, validateTolerations(nodePlacement.Tolerations, fldPath.Child("tolerations"))...) + } + return errs +} + +// validateTolerations validates a slice of corev1.Toleration. +func validateTolerations(versionedTolerations []corev1.Toleration, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + unversionedTolerations := make([]api.Toleration, len(versionedTolerations)) + for i := range versionedTolerations { + if err := k8s_api_v1.Convert_v1_Toleration_To_core_Toleration(&versionedTolerations[i], &unversionedTolerations[i], nil); err != nil { + allErrors = append(allErrors, field.Invalid(fldPath.Index(i), unversionedTolerations[i], err.Error())) + } + } + allErrors = append(allErrors, apivalidation.ValidateTolerations(unversionedTolerations, fldPath)...) + return allErrors +} + +// validateUpstreamResolversCreate validates configuration of the Upstream objects when TLS is configured. +func validateUpstreamResolversCreate(upstreamResolvers operatorv1.UpstreamResolvers, fieldPath *field.Path) field.ErrorList { + var errs field.ErrorList + + errs = append(errs, validateDNSTransportConfig(upstreamResolvers.TransportConfig, fieldPath.Child("transportConfig"))...) + + if upstreamResolvers.TransportConfig.Transport == operatorv1.TLSTransport { + // Transport is TLS so we must check if there are mixed Upstream types. SystemResolveConf is not allowed with TLS. + for i, upstream := range upstreamResolvers.Upstreams { + if upstream.Type == operatorv1.SystemResolveConfType { + errMessage := "SystemResolvConf is not allowed when TLS is configured as the transport" + errs = append(errs, field.Invalid(fieldPath.Child("upstreams").Index(i).Child("type"), upstream.Type, errMessage)) + } + } + } + + return errs +} + +// validateUpstreamResolversUpdate validates configuration of the Upstream objects when TLS is configured. +func validateUpstreamResolversUpdate(newUpstreamResolvers operatorv1.UpstreamResolvers, oldUpstreamResolvers operatorv1.UpstreamResolvers, fieldPath *field.Path) field.ErrorList { + var errs field.ErrorList + newTransport := newUpstreamResolvers.TransportConfig.Transport + + if !reflect.DeepEqual(newUpstreamResolvers.TransportConfig, oldUpstreamResolvers.TransportConfig) || isKnownTransport(newTransport) { + errs = append(errs, validateUpstreamResolversCreate(newUpstreamResolvers, fieldPath)...) + } + + return errs +} + +func isKnownTransport(transport operatorv1.DNSTransport) bool { + switch transport { + case "", operatorv1.CleartextTransport, operatorv1.TLSTransport: + return true + default: + return false + } + +} + +func validateServersCreate(servers []operatorv1.Server, fieldPath *field.Path) field.ErrorList { + var errs field.ErrorList + for i, server := range servers { + errs = append(errs, validateDNSTransportConfig(server.ForwardPlugin.TransportConfig, fieldPath.Index(i).Child("forwardPlugin").Child("transportConfig"))...) + } + return errs +} + +func validateServersUpdate(newServers []operatorv1.Server, oldServers []operatorv1.Server, fieldPath *field.Path) field.ErrorList { + var errs field.ErrorList + for i, newServer := range newServers { + for _, oldServer := range oldServers { + // Use server.Name as the pivot for comparison since a cluster admin could conceivably change the transport + // and/or upstreams, making those insufficient for comparison. + if newServer.Name == oldServer.Name { + // TransportConfig has changed + if !reflect.DeepEqual(newServer.ForwardPlugin.TransportConfig, oldServer.ForwardPlugin.TransportConfig) { + errs = append(validateDNSTransportConfig(newServer.ForwardPlugin.TransportConfig, fieldPath.Index(i).Child("forwardPlugin").Child("transportConfig"))) + } + } + } + } + return errs +} + +func validateDNSTransportConfig(transportConfig operatorv1.DNSTransportConfig, fieldPath *field.Path) field.ErrorList { + var errs field.ErrorList + var emptyTransportConfig operatorv1.DNSTransportConfig + tlsConfig := transportConfig.TLS + + // No validation is needed on an empty TransportConfig. + if transportConfig == emptyTransportConfig { + return errs + } + + switch transportConfig.Transport { + case "", operatorv1.CleartextTransport: + // Don't allow TLS configuration when using empty or Cleartext + if tlsConfig != nil { + errs = append(errs, field.Invalid(fieldPath.Child("tls"), transportConfig.TLS, "TLS must not be configured when using an empty or cleartext transport")) + } + case operatorv1.TLSTransport: + // When Transport is TLS, there MUST be a ServerName configured. + if tlsConfig == nil || tlsConfig.ServerName == "" { + errs = append(errs, field.Required(fieldPath.Child("tls").Child("serverName"), "transportConfig requires a serverName when transport is TLS")) + } + default: + errs = append(errs, field.Invalid(fieldPath.Child("transport"), transportConfig.Transport, "unknown transport")) + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns_test.go new file mode 100644 index 0000000000000..7e557b004447f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns_test.go @@ -0,0 +1,899 @@ +package dns + +import ( + "testing" + + operatorv1 "github.com/openshift/api/operator/v1" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// TestFailValidateDNSSpec verifies that validateDNSSpec rejects invalid specs. +func TestFailValidateDNSSpecCreate(t *testing.T) { + errorCases := map[string]struct { + spec operatorv1.DNSSpec + errorType field.ErrorType + errorField string + }{ + "invalid toleration": { + spec: operatorv1.DNSSpec{ + NodePlacement: operatorv1.DNSNodePlacement{ + Tolerations: []corev1.Toleration{{ + Key: "x", + Operator: corev1.TolerationOpExists, + Effect: "NoExcute", + }}, + }, + }, + errorType: field.ErrorTypeNotSupported, + errorField: "spec.nodePlacement.tolerations[0].effect", + }, + "invalid node selector": { + spec: operatorv1.DNSSpec{ + NodePlacement: operatorv1.DNSNodePlacement{ + NodeSelector: map[string]string{ + "-": "foo", + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.nodePlacement.nodeSelector", + }, + "SystemResolveConfType Upstream with TLS configured": { + spec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.upstreams[0].type", + }, + "Mixed Upstream types with TLS configured": { + spec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.upstreams[0].type", + }, + "Unknown Transport configured": { + spec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "random", + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.transportConfig.transport", + }, + "ForwardPlugin configured with TLS and without ServerName": { + spec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "", + }, + }, + }, + }, + }, + }, + errorType: field.ErrorTypeRequired, + errorField: "spec.servers[0].forwardPlugin.transportConfig.tls.serverName", + }, + "ForwardPlugin configured with Cleartext and TLS configuration": { + spec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + {Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }}, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.servers[0].forwardPlugin.transportConfig.tls", + }, + } + + for tcName, tc := range errorCases { + errs := validateDNSSpecCreate(tc.spec) + if len(errs) == 0 { + t.Errorf("%q: should have failed but did not", tcName) + } + + for _, e := range errs { + if e.Type != tc.errorType { + t.Errorf("%q: expected errors of type '%s', got %v:", tcName, tc.errorType, e) + } + + if e.Field != tc.errorField { + t.Errorf("%q: expected errors in field '%s', got %v:", tcName, tc.errorField, e) + } + } + } +} + +func TestFailValidateDNSSpecUpdate(t *testing.T) { + errorCases := map[string]struct { + oldSpec operatorv1.DNSSpec + newSpec operatorv1.DNSSpec + errorType field.ErrorType + errorField string + }{ + "UpstreamResolvers configured with unknown transport and updated to invalid cleartext config": { + oldSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "oldtransport", + }, + Upstreams: []operatorv1.Upstream{ + { + Type: "foo", + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.upstreams[0].type", + }, + "SystemResolveConfType Upstream with TLS configured": { + oldSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + Address: "2.2.2.2", + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.upstreams[0].type", + }, + "UpstreamResolvers configured with unknown transport and updated to invalid TLS configuration": { + oldSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "unknown", + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + }, + }, + }, + errorType: field.ErrorTypeRequired, + errorField: "spec.upstreamResolvers.transportConfig.tls.serverName", + }, + "ForwardPlugin configured with unknown transport and updated to invalid TLS configuration": { + oldSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "unknown", + }, + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + }, + }, + }, + }, + }, + errorType: field.ErrorTypeRequired, + errorField: "spec.servers[0].forwardPlugin.transportConfig.tls.serverName", + }, + "UpstreamResolvers TransportConfig has not changed but Upstreams has changed": { + oldSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.NetworkResolverType, + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.upstreams[0].type", + }, + "Servers Transport changed from known (TLS) to unknown type": { + oldSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "unknown-transport-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "unknown-transport-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "unknown", + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.servers[0].forwardPlugin.transportConfig.transport", + }, + "UpstreamResolvers Transport changed from known (TLS) to unknown type": { + oldSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "unknown", + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.transportConfig.transport", + }, + "Uniform Upstream types to mixed Upstream types with TLS configured": { + oldSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.NetworkResolverType, + }, + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.upstreamResolvers.upstreams[0].type", + }, + "UpstreamResolvers TLS configured without ServerName": { + oldSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "", + }, + }, + }, + }, + errorType: field.ErrorTypeRequired, + errorField: "spec.upstreamResolvers.transportConfig.tls.serverName", + }, + "ForwardPlugin configured with TLS and without ServerName": { + oldSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "has-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.tls-server.com", + }, + }, + }, + }, + { + Name: "no-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.tls-server.com", + }, + }, + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "has-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.tls-server.com", + }, + }, + }, + }, + { + Name: "no-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "", + }, + }, + }, + }, + }, + }, + errorType: field.ErrorTypeRequired, + errorField: "spec.servers[1].forwardPlugin.transportConfig.tls.serverName", + }, + "ForwardPlugin configured with Cleartext and TLS configuration": { + oldSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + }, + }, + newSpec: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.servers[0].forwardPlugin.transportConfig.tls", + }, + } + + for tcName, tc := range errorCases { + errs := validateDNSSpecUpdate(tc.newSpec, tc.oldSpec) + if len(errs) == 0 { + t.Errorf("%q: should have failed but did not", tcName) + } + + for _, e := range errs { + if e.Type != tc.errorType { + t.Errorf("%q: expected errors of type '%s', got %v:", tcName, tc.errorType, e) + } + + if e.Field != tc.errorField { + t.Errorf("%q: expected errors in field '%s', got %v:", tcName, tc.errorField, e) + } + } + } +} + +// TestSucceedValidateDNSSpec verifies that validateDNSSpec accepts valid specs. +func TestSucceedValidateDNSSpecCreate(t *testing.T) { + successCases := map[string]operatorv1.DNSSpec{ + "empty": {}, + "toleration + node selector": { + NodePlacement: operatorv1.DNSNodePlacement{ + NodeSelector: map[string]string{ + "node-role.kubernetes.io/master": "", + }, + Tolerations: []corev1.Toleration{{ + Key: "node-role.kubernetes.io/master", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoExecute, + }}, + }, + }, + "NetworkResolverType Upstream with TLS configured": { + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + }, + }, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + "Mixed Upstream types without TLS configured": { + UpstreamResolvers: operatorv1.UpstreamResolvers{ + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + }, + }, + "Mixed Upstream types with Cleartext configured": { + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + }, + }, + "Mixed Upstream types with nil TransportConfig configured": { + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{}, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + }, + }, + "Mixed Upstream types with empty Transport configured": { + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "", + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + Port: 7777, + }, + }, + }, + }, + } + + for tcName, s := range successCases { + errs := validateDNSSpecCreate(s) + if len(errs) != 0 { + t.Errorf("%q: expected success, but failed: %v", tcName, errs.ToAggregate().Error()) + } + } +} + +func TestSucceedValidateDNSSpecUpdate(t *testing.T) { + testCases := []struct { + description string + new operatorv1.DNSSpec + old operatorv1.DNSSpec + }{ + { + description: "UpstreamResolvers TransportConfig has not changed but Upstreams have changed", + old: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + Address: "1.1.1.1", + }, + }, + }, + }, + new: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.NetworkResolverType, + Address: "1.1.1.1", + }, + }, + }, + }, + }, + { + description: "UpstreamResolvers unknown old transport matches unknown new transport", + old: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "oldtransport", + }, + }, + }, + new: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "oldtransport", + }, + }, + }, + }, + { + description: "UpstreamResolvers unknown old transport matches unknown new transport with Upstream changes", + old: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "oldtransport", + }, + Upstreams: []operatorv1.Upstream{ + { + Type: operatorv1.SystemResolveConfType, + }, + }, + }, + }, + new: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "oldtransport", + }, + Upstreams: []operatorv1.Upstream{ + { + Type: "random", + }, + }, + }, + }, + }, + { + description: "UpstreamResolvers TransportConfig has changed", + old: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + }, + }, + new: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + }, + { + description: "UpstreamResolvers known transport to empty", + old: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + }, + }, + new: operatorv1.DNSSpec{ + UpstreamResolvers: operatorv1.UpstreamResolvers{ + TransportConfig: operatorv1.DNSTransportConfig{}, + }, + }, + }, + { + description: "Servers TransportConfig has not changed", + old: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + }, + }, + }, + }, + new: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + }, + }, + }, + }, + }, + { + description: "Compare configuration by server name", + old: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "cleartext-transport-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + }, + }, + { + Name: "unknown-transport-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "unknown", + }, + }, + }, + }, + }, + new: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "unknown-transport-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: "unknown", + }, + }, + }, + { + Name: "cleartext-transport-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.2"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.CleartextTransport, + }, + }, + }, + }, + }, + }, + { + description: "Servers TransportConfig has changed", + old: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{}, + }, + }, + }, + }, + new: operatorv1.DNSSpec{ + Servers: []operatorv1.Server{ + { + Name: "tls-server", + ForwardPlugin: operatorv1.ForwardPlugin{ + Upstreams: []string{"1.1.1.1"}, + TransportConfig: operatorv1.DNSTransportConfig{ + Transport: operatorv1.TLSTransport, + TLS: &operatorv1.DNSOverTLSConfig{ + ServerName: "dns.example.com", + }, + }, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + errs := validateDNSSpecUpdate(tc.new, tc.old) + if len(errs) != 0 { + t.Errorf("%q: expected success, but failed: %v", tc.description, errs.ToAggregate().Error()) + } + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features.go b/openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features.go new file mode 100644 index 0000000000000..fb3c07f3ff6e3 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features.go @@ -0,0 +1,93 @@ +package features + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateFeatureGate" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("featuregates"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("FeatureGate"): featureGateV1{}, + }) + }) +} + +func toFeatureGateV1(uncastObj runtime.Object) (*configv1.FeatureGate, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.FeatureGate) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"FeatureGate"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type featureGateV1 struct { +} + +func (featureGateV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toFeatureGateV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + + return allErrs +} + +func (featureGateV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toFeatureGateV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toFeatureGateV1(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return allErrs +} + +func (featureGateV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toFeatureGateV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toFeatureGateV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/helpers.go b/openshift-kube-apiserver/admission/customresourcevalidation/helpers.go new file mode 100644 index 0000000000000..9248d469a7b95 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/helpers.go @@ -0,0 +1,40 @@ +package customresourcevalidation + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core/validation" + + configv1 "github.com/openshift/api/config/v1" +) + +func ValidateConfigMapReference(path *field.Path, configMap configv1.ConfigMapNameReference, required bool) field.ErrorList { + return validateConfigMapSecret(path.Child("name"), configMap.Name, required, validation.ValidateConfigMapName) +} + +func ValidateSecretReference(path *field.Path, secret configv1.SecretNameReference, required bool) field.ErrorList { + return validateConfigMapSecret(path.Child("name"), secret.Name, required, validation.ValidateSecretName) +} + +func validateConfigMapSecret(path *field.Path, name string, required bool, validator validation.ValidateNameFunc) field.ErrorList { + if len(name) == 0 { + if required { + return field.ErrorList{field.Required(path, "")} + } + return nil + } + + if valErrs := validator(name, false); len(valErrs) > 0 { + return field.ErrorList{field.Invalid(path, name, strings.Join(valErrs, ", "))} + } + return nil +} + +// RequireNameCluster is a name validation function that requires the name to be cluster. It's handy for config.openshift.io types. +func RequireNameCluster(name string, prefix bool) []string { + if name != "cluster" { + return []string{"must be cluster"} + } + return nil +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/image/validate_image.go b/openshift-kube-apiserver/admission/customresourcevalidation/image/validate_image.go new file mode 100644 index 0000000000000..aa1fb01573bd6 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/image/validate_image.go @@ -0,0 +1,95 @@ +package image + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateImage" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("images"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Image"): imageV1{}, + }) + }) +} + +func toImageV1(uncastObj runtime.Object) (*configv1.Image, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Image) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Image"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type imageV1 struct { +} + +func (imageV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, errs := toImageV1(uncastObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + + return errs +} + +func (imageV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toImageV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toImageV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} + +func (imageV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toImageV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toImageV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager/validate_kubecontrollermanager.go b/openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager/validate_kubecontrollermanager.go new file mode 100644 index 0000000000000..8b3f0fe8be9b9 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager/validate_kubecontrollermanager.go @@ -0,0 +1,114 @@ +package kubecontrollermanager + +import ( + "context" + "fmt" + "io" + + operatorv1 "github.com/openshift/api/operator/v1" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "operator.openshift.io/ValidateKubeControllerManager" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + operatorv1.Resource("kubecontrollermanagers"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + operatorv1.GroupVersion.WithKind("KubeControllerManager"): kubeControllerManagerV1{}, + }) + }) +} + +func toKubeControllerManager(uncastObj runtime.Object) (*operatorv1.KubeControllerManager, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*operatorv1.KubeControllerManager) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"KubeControllerManager"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"operator.openshift.io/v1"})) + } + + return obj, nil +} + +type kubeControllerManagerV1 struct { +} + +func validateKubeControllerManagerSpecCreate(spec operatorv1.KubeControllerManagerSpec) field.ErrorList { + allErrs := field.ErrorList{} + + // on create, we allow anything + return allErrs +} + +func validateKubeControllerManagerSpecUpdate(spec, oldSpec operatorv1.KubeControllerManagerSpec) field.ErrorList { + allErrs := field.ErrorList{} + + // on update, fail if we go from secure to insecure + if oldSpec.UseMoreSecureServiceCA && !spec.UseMoreSecureServiceCA { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.useMoreSecureServiceCA"), "once enabled, the more secure service-ca.crt cannot be disabled")) + } + + return allErrs +} + +func (kubeControllerManagerV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toKubeControllerManager(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateKubeControllerManagerSpecCreate(obj.Spec)...) + + return allErrs +} + +func (kubeControllerManagerV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toKubeControllerManager(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toKubeControllerManager(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateKubeControllerManagerSpecUpdate(obj.Spec, oldObj.Spec)...) + + return allErrs +} + +func (kubeControllerManagerV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toKubeControllerManager(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toKubeControllerManager(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/network/validate_network_config.go b/openshift-kube-apiserver/admission/customresourcevalidation/network/validate_network_config.go new file mode 100644 index 0000000000000..2fbedb220b7a2 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/network/validate_network_config.go @@ -0,0 +1,128 @@ +package network + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateNetwork" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("networks"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Network"): networkV1{}, + }) + }) +} + +func toNetworkV1(uncastObj runtime.Object) (*configv1.Network, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Network) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Network"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type networkV1 struct { +} + +func validateNetworkServiceNodePortRangeUpdate(obj, oldObj *configv1.Network) *field.Error { + var err error + defaultRange := kubeoptions.DefaultServiceNodePortRange + oldRange := &defaultRange + newRange := &defaultRange + + oldRangeStr := oldObj.Spec.ServiceNodePortRange + if oldRangeStr != "" { + if oldRange, err = utilnet.ParsePortRange(oldRangeStr); err != nil { + return field.Invalid(field.NewPath("spec", "serviceNodePortRange"), + oldRangeStr, + fmt.Sprintf("failed to parse the old port range: %v", err)) + } + } + newRangeStr := obj.Spec.ServiceNodePortRange + if newRangeStr != "" { + if newRange, err = utilnet.ParsePortRange(newRangeStr); err != nil { + return field.Invalid(field.NewPath("spec", "serviceNodePortRange"), + newRangeStr, + fmt.Sprintf("failed to parse the new port range: %v", err)) + } + } + if !newRange.Contains(oldRange.Base) || !newRange.Contains(oldRange.Base+oldRange.Size-1) { + return field.Invalid(field.NewPath("spec", "serviceNodePortRange"), + newRangeStr, + fmt.Sprintf("new service node port range %s does not completely cover the previous range %s", newRange, oldRange)) + } + return nil +} + +func (networkV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toNetworkV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + + return allErrs +} + +func (networkV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toNetworkV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toNetworkV1(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + if err := validateNetworkServiceNodePortRangeUpdate(obj, oldObj); err != nil { + allErrs = append(allErrs, err) + } + + return allErrs +} + +func (networkV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toNetworkV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toNetworkV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile.go b/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile.go new file mode 100644 index 0000000000000..b4b63914f8d71 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile.go @@ -0,0 +1,124 @@ +package node + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +var rejectionScenarios = []struct { + fromProfile configv1.WorkerLatencyProfileType + toProfile configv1.WorkerLatencyProfileType +}{ + {fromProfile: "", toProfile: configv1.LowUpdateSlowReaction}, + {fromProfile: configv1.LowUpdateSlowReaction, toProfile: ""}, + {fromProfile: configv1.DefaultUpdateDefaultReaction, toProfile: configv1.LowUpdateSlowReaction}, + {fromProfile: configv1.LowUpdateSlowReaction, toProfile: configv1.DefaultUpdateDefaultReaction}, +} + +const PluginName = "config.openshift.io/RestrictExtremeWorkerLatencyProfile" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("nodes"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Node"): configNodeV1{}, + }) + }) +} + +func toConfigNodeV1(uncastObj runtime.Object) (*configv1.Node, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Node) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Node"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type configNodeV1 struct{} + +func validateConfigNodeForExtremeLatencyProfile(obj, oldObj *configv1.Node) *field.Error { + fromProfile := oldObj.Spec.WorkerLatencyProfile + toProfile := obj.Spec.WorkerLatencyProfile + + for _, rejectionScenario := range rejectionScenarios { + if fromProfile == rejectionScenario.fromProfile && toProfile == rejectionScenario.toProfile { + return field.Invalid(field.NewPath("spec", "workerLatencyProfile"), obj.Spec.WorkerLatencyProfile, + fmt.Sprintf( + "cannot update worker latency profile from %q to %q as extreme profile transition is unsupported, please select any other profile with supported transition such as %q", + oldObj.Spec.WorkerLatencyProfile, + obj.Spec.WorkerLatencyProfile, + configv1.MediumUpdateAverageReaction, + ), + ) + } + } + return nil +} + +func (configNodeV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toConfigNodeV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + + return allErrs +} + +func (configNodeV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toConfigNodeV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toConfigNodeV1(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + if err := validateConfigNodeForExtremeLatencyProfile(obj, oldObj); err != nil { + allErrs = append(allErrs, err) + } + + return allErrs +} + +func (configNodeV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toConfigNodeV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toConfigNodeV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile_test.go new file mode 100644 index 0000000000000..b22c6a2da90a1 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile_test.go @@ -0,0 +1,68 @@ +package node + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + configv1 "github.com/openshift/api/config/v1" +) + +func TestValidateConfigNodeForExtremeLatencyProfile(t *testing.T) { + testCases := []struct { + fromProfile configv1.WorkerLatencyProfileType + toProfile configv1.WorkerLatencyProfileType + shouldReject bool + }{ + // no rejections + {fromProfile: "", toProfile: "", shouldReject: false}, + {fromProfile: "", toProfile: configv1.DefaultUpdateDefaultReaction, shouldReject: false}, + {fromProfile: "", toProfile: configv1.MediumUpdateAverageReaction, shouldReject: false}, + {fromProfile: configv1.DefaultUpdateDefaultReaction, toProfile: "", shouldReject: false}, + {fromProfile: configv1.DefaultUpdateDefaultReaction, toProfile: configv1.DefaultUpdateDefaultReaction, shouldReject: false}, + {fromProfile: configv1.DefaultUpdateDefaultReaction, toProfile: configv1.MediumUpdateAverageReaction, shouldReject: false}, + {fromProfile: configv1.MediumUpdateAverageReaction, toProfile: "", shouldReject: false}, + {fromProfile: configv1.MediumUpdateAverageReaction, toProfile: configv1.DefaultUpdateDefaultReaction, shouldReject: false}, + {fromProfile: configv1.MediumUpdateAverageReaction, toProfile: configv1.MediumUpdateAverageReaction, shouldReject: false}, + {fromProfile: configv1.MediumUpdateAverageReaction, toProfile: configv1.LowUpdateSlowReaction, shouldReject: false}, + {fromProfile: configv1.LowUpdateSlowReaction, toProfile: configv1.MediumUpdateAverageReaction, shouldReject: false}, + {fromProfile: configv1.LowUpdateSlowReaction, toProfile: configv1.LowUpdateSlowReaction, shouldReject: false}, + + // rejections + {fromProfile: "", toProfile: configv1.LowUpdateSlowReaction, shouldReject: true}, + {fromProfile: configv1.DefaultUpdateDefaultReaction, toProfile: configv1.LowUpdateSlowReaction, shouldReject: true}, + {fromProfile: configv1.LowUpdateSlowReaction, toProfile: "", shouldReject: true}, + {fromProfile: configv1.LowUpdateSlowReaction, toProfile: configv1.DefaultUpdateDefaultReaction, shouldReject: true}, + } + + for _, testCase := range testCases { + shouldStr := "should not be" + if testCase.shouldReject { + shouldStr = "should be" + } + testCaseName := fmt.Sprintf("update from profile %s to %s %s rejected", testCase.fromProfile, testCase.toProfile, shouldStr) + t.Run(testCaseName, func(t *testing.T) { + // config node objects + oldObject := configv1.Node{ + Spec: configv1.NodeSpec{ + WorkerLatencyProfile: testCase.fromProfile, + }, + } + newObject := configv1.Node{ + Spec: configv1.NodeSpec{ + WorkerLatencyProfile: testCase.toProfile, + }, + } + + fieldErr := validateConfigNodeForExtremeLatencyProfile(&oldObject, &newObject) + assert.Equal(t, testCase.shouldReject, fieldErr != nil, "latency profile from %q to %q %s rejected", testCase.fromProfile, testCase.toProfile, shouldStr) + + if testCase.shouldReject { + assert.Equal(t, "spec.workerLatencyProfile", fieldErr.Field, "field name during for latency profile should be spec.workerLatencyProfile") + assert.Contains(t, fieldErr.Detail, testCase.fromProfile, "error message should contain %q", testCase.fromProfile) + assert.Contains(t, fieldErr.Detail, testCase.toProfile, "error message should contain %q", testCase.toProfile) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/helpers.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/helpers.go new file mode 100644 index 0000000000000..126a53bb9ac1c --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/helpers.go @@ -0,0 +1,32 @@ +package oauth + +import ( + kvalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + netutils "k8s.io/utils/net" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/validation" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func isValidHostname(hostname string) bool { + return len(kvalidation.IsDNS1123Subdomain(hostname)) == 0 || netutils.ParseIPSloppy(hostname) != nil +} + +func ValidateRemoteConnectionInfo(remoteConnectionInfo configv1.OAuthRemoteConnectionInfo, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(remoteConnectionInfo.URL) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("url"), "")) + } else { + _, urlErrs := validation.ValidateSecureURL(remoteConnectionInfo.URL, fldPath.Child("url")) + allErrs = append(allErrs, urlErrs...) + } + + allErrs = append(allErrs, crvalidation.ValidateConfigMapReference(fldPath.Child("ca"), remoteConnectionInfo.CA, false)...) + allErrs = append(allErrs, crvalidation.ValidateSecretReference(fldPath.Child("tlsClientCert"), remoteConnectionInfo.TLSClientCert, false)...) + allErrs = append(allErrs, crvalidation.ValidateSecretReference(fldPath.Child("tlsClientKey"), remoteConnectionInfo.TLSClientKey, false)...) + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github.go new file mode 100644 index 0000000000000..2ae0b45254a14 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github.go @@ -0,0 +1,69 @@ +package oauth + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func ValidateGitHubIdentityProvider(provider *configv1.GitHubIdentityProvider, mappingMethod configv1.MappingMethodType, fieldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if provider == nil { + errs = append(errs, field.Required(fieldPath, "")) + return errs + } + + errs = append(errs, ValidateOAuthIdentityProvider(provider.ClientID, provider.ClientSecret, fieldPath.Child("provider"))...) + + if len(provider.Teams) > 0 && len(provider.Organizations) > 0 { + errs = append(errs, field.Invalid(fieldPath.Child("organizations"), provider.Organizations, "specify organizations or teams, not both")) + errs = append(errs, field.Invalid(fieldPath.Child("teams"), provider.Teams, "specify organizations or teams, not both")) + } + + // only check that there are some teams/orgs if not GitHub Enterprise Server + if len(provider.Hostname) == 0 && len(provider.Teams) == 0 && len(provider.Organizations) == 0 && mappingMethod != configv1.MappingMethodLookup { + errs = append(errs, field.Invalid(fieldPath, nil, "one of organizations or teams must be specified unless hostname is set or lookup is used")) + } + for i, organization := range provider.Organizations { + if strings.Contains(organization, "/") { + errs = append(errs, field.Invalid(fieldPath.Child("organizations").Index(i), organization, "cannot contain /")) + } + if len(organization) == 0 { + errs = append(errs, field.Required(fieldPath.Child("organizations").Index(i), "cannot be empty")) + } + } + for i, team := range provider.Teams { + if split := strings.Split(team, "/"); len(split) != 2 { + errs = append(errs, field.Invalid(fieldPath.Child("teams").Index(i), team, "must be in the format /")) + } else if org, t := split[0], split[1]; len(org) == 0 || len(t) == 0 { + errs = append(errs, field.Invalid(fieldPath.Child("teams").Index(i), team, "must be in the format /")) + } + } + + if hostname := provider.Hostname; len(hostname) != 0 { + hostnamePath := fieldPath.Child("hostname") + + if hostname == "github.com" || strings.HasSuffix(hostname, ".github.com") { + errs = append(errs, field.Invalid(hostnamePath, hostname, "cannot equal [*.]github.com")) + } + + if !isValidHostname(hostname) { + errs = append(errs, field.Invalid(hostnamePath, hostname, "must be a valid DNS subdomain or IP address")) + } + } + + if caFile := provider.CA; len(caFile.Name) != 0 { + caPath := fieldPath.Child("ca") + + errs = append(errs, crvalidation.ValidateConfigMapReference(caPath, caFile, true)...) + + if len(provider.Hostname) == 0 { + errs = append(errs, field.Invalid(caPath, caFile, "cannot be specified when hostname is empty")) + } + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github_test.go new file mode 100644 index 0000000000000..10102f24e45e8 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github_test.go @@ -0,0 +1,249 @@ +package oauth + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" +) + +func TestValidateGitHubIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.GitHubIdentityProvider + mappingMethod configv1.MappingMethodType + fieldPath *field.Path + } + tests := []struct { + name string + args args + errors field.ErrorList + }{ + { + name: "cannot use GH as hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "github.com", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "hostname", BadValue: "github.com", Detail: "cannot equal [*.]github.com"}, + }, + }, + { + name: "cannot use GH subdomain as hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "foo.github.com", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "hostname", BadValue: "foo.github.com", Detail: "cannot equal [*.]github.com"}, + }, + }, + { + name: "valid domain hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "company.com", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + }, + { + name: "valid ip hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "192.168.8.1", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + }, + { + name: "invalid ip hostname with port", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "192.168.8.1:8080", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "hostname", BadValue: "192.168.8.1:8080", Detail: "must be a valid DNS subdomain or IP address"}, + }, + }, + { + name: "invalid domain hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "google-.com", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "hostname", BadValue: "google-.com", Detail: "must be a valid DNS subdomain or IP address"}, + }, + }, + { + name: "invalid name in ca ref and no hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "", + CA: configv1.ConfigMapNameReference{Name: "ca&config-map"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "ca.name", BadValue: "ca&config-map", Detail: "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')"}, + {Type: field.ErrorTypeInvalid, Field: "ca", BadValue: configv1.ConfigMapNameReference{Name: "ca&config-map"}, Detail: "cannot be specified when hostname is empty"}, + }, + }, + { + name: "valid ca and hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "mo.co", + CA: configv1.ConfigMapNameReference{Name: "ca-config-map"}, + }, + mappingMethod: "", + }, + }, + { + name: "GitHub requires client ID and secret", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "", + ClientSecret: configv1.SecretNameReference{}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeRequired, Field: "provider.clientID", BadValue: "", Detail: ""}, + {Type: field.ErrorTypeRequired, Field: "provider.clientSecret.name", BadValue: "", Detail: ""}, + }, + }, + { + name: "GitHub warns when not constrained to organizations or teams without lookup", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: nil, + Teams: nil, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "", BadValue: nil, Detail: "one of organizations or teams must be specified unless hostname is set or lookup is used"}, + }, + }, + { + name: "GitHub does not warn when not constrained to organizations or teams with lookup", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: nil, + Teams: nil, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "lookup", + }, + }, + { + name: "invalid cannot specific both organizations and teams", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: []string{"org1/team1"}, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "organizations", BadValue: []string{"org1"}, Detail: "specify organizations or teams, not both"}, + {Type: field.ErrorTypeInvalid, Field: "teams", BadValue: []string{"org1/team1"}, Detail: "specify organizations or teams, not both"}, + }, + }, + { + name: "invalid team format", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: nil, + Teams: []string{"org1/team1", "org2/not/team2", "org3//team3", "", "org4/team4"}, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "teams[1]", BadValue: "org2/not/team2", Detail: "must be in the format /"}, + {Type: field.ErrorTypeInvalid, Field: "teams[2]", BadValue: "org3//team3", Detail: "must be in the format /"}, + {Type: field.ErrorTypeInvalid, Field: "teams[3]", BadValue: "", Detail: "must be in the format /"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ValidateGitHubIdentityProvider(tt.args.provider, tt.args.mappingMethod, tt.args.fieldPath) + if tt.errors == nil && len(got) == 0 { + return + } + if !reflect.DeepEqual(got, tt.errors) { + t.Errorf("ValidateGitHubIdentityProvider() = %v, want %v", got, tt.errors) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab.go new file mode 100644 index 0000000000000..ea9fda2ab4d8c --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab.go @@ -0,0 +1,26 @@ +package oauth + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/validation" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func ValidateGitLabIdentityProvider(provider *configv1.GitLabIdentityProvider, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if provider == nil { + allErrs = append(allErrs, field.Required(fieldPath, "")) + return allErrs + } + + allErrs = append(allErrs, ValidateOAuthIdentityProvider(provider.ClientID, provider.ClientSecret, fieldPath)...) + + _, urlErrs := validation.ValidateSecureURL(provider.URL, fieldPath.Child("url")) + allErrs = append(allErrs, urlErrs...) + + allErrs = append(allErrs, crvalidation.ValidateConfigMapReference(fieldPath.Child("ca"), provider.CA, false)...) + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab_test.go new file mode 100644 index 0000000000000..9ce73cdc731ee --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab_test.go @@ -0,0 +1,104 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func gitlabIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeGitLab, + GitLab: &configv1.GitLabIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-gitlab-secret"}, + URL: "https://thisgitlabinstancerighthere.com", + CA: configv1.ConfigMapNameReference{Name: "letsencrypt-for-gitlab.instance"}, + }, + } +} + +func TestValidateGitLabIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.GitLabIdentityProvider + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "insecure URL", + args: args{ + provider: &configv1.GitLabIdentityProvider{ + ClientID: "hereBeMyId", + ClientSecret: configv1.SecretNameReference{Name: "gitlab-client-sec"}, + URL: "http://anyonecanseemenow.com", + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("url"), "http://anyonecanseemenow.com", "must use https scheme"), + }, + }, + { + name: "missing client ID and secret", + args: args{ + provider: &configv1.GitLabIdentityProvider{ + URL: "https://privategitlab.com", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("clientID"), ""), + field.Required(field.NewPath("clientSecret", "name"), ""), + }, + }, + { + name: "invalid CA ref name", + args: args{ + provider: &configv1.GitLabIdentityProvider{ + ClientID: "hereBeMyId", + ClientSecret: configv1.SecretNameReference{Name: "gitlab-client-sec"}, + URL: "https://anyonecanseemenow.com", + CA: configv1.ConfigMapNameReference{Name: "veryBadRefName?:("}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("ca", "name"), "veryBadRefName?:(", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "minimal passing case", + args: args{ + provider: &configv1.GitLabIdentityProvider{ + ClientID: "hereBeMyId", + ClientSecret: configv1.SecretNameReference{Name: "gitlab-client-sec"}, + URL: "https://anyonecanseemenow.com", + }, + }, + want: field.ErrorList{}, + }, + { + name: "more complicated case", + args: args{ + provider: gitlabIDP().GitLab, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateGitLabIdentityProvider(tt.args.provider, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateGitLabIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google.go new file mode 100644 index 0000000000000..481b162cf756b --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google.go @@ -0,0 +1,23 @@ +package oauth + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" +) + +func ValidateGoogleIdentityProvider(provider *configv1.GoogleIdentityProvider, mappingMethod configv1.MappingMethodType, fieldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if provider == nil { + errs = append(errs, field.Required(fieldPath, "")) + return errs + } + + errs = append(errs, ValidateOAuthIdentityProvider(provider.ClientID, provider.ClientSecret, fieldPath)...) + + if len(provider.HostedDomain) == 0 && mappingMethod != configv1.MappingMethodLookup { + errs = append(errs, field.Invalid(fieldPath.Child("hostedDomain"), nil, "hostedDomain must be specified unless lookup is used")) + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google_test.go new file mode 100644 index 0000000000000..88306d0f1919f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google_test.go @@ -0,0 +1,90 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func googleIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeGoogle, + Google: &configv1.GoogleIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-google-secret"}, + HostedDomain: "myprivategoogledomain.com", + }, + } +} + +func TestValidateGoogleIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.GoogleIdentityProvider + mappingMethod configv1.MappingMethodType + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "missing client ID and secret", + args: args{ + provider: &configv1.GoogleIdentityProvider{ + HostedDomain: "myprivategoogledomain.com", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("clientID"), ""), + field.Required(field.NewPath("clientSecret", "name"), ""), + }, + }, + { + name: "no hosted domain with mapping method != 'lookup'", + args: args{ + provider: &configv1.GoogleIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-google-secret"}, + }, + mappingMethod: configv1.MappingMethodClaim, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("hostedDomain"), nil, "hostedDomain must be specified unless lookup is used"), + }, + }, + { + name: "no hosted domain with mapping method == 'lookup'", + args: args{ + provider: &configv1.GoogleIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-google-secret"}, + }, + mappingMethod: configv1.MappingMethodLookup, + }, + want: field.ErrorList{}, + }, + { + name: "working example", + args: args{ + provider: googleIDP().Google, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateGoogleIdentityProvider(tt.args.provider, tt.args.mappingMethod, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateGoogleIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp.go new file mode 100644 index 0000000000000..86e8158c95799 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp.go @@ -0,0 +1,215 @@ +package oauth + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/validation/path" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + pointerutil "k8s.io/utils/pointer" + + configv1 "github.com/openshift/api/config/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const ( + // MinimumInactivityTimeoutSeconds defines the the smallest value allowed + // for AccessTokenInactivityTimeoutSeconds. + // It also defines the ticker interval for the token update routine as + // MinimumInactivityTimeoutSeconds / 3 is used there. + MinimumInactivityTimeoutSeconds = 5 * 60 +) + +var validMappingMethods = sets.NewString( + string(configv1.MappingMethodLookup), + string(configv1.MappingMethodClaim), + string(configv1.MappingMethodAdd), +) + +func validateOAuthSpec(spec configv1.OAuthSpec) field.ErrorList { + errs := field.ErrorList{} + specPath := field.NewPath("spec") + + providerNames := sets.NewString() + + challengeIssuingIdentityProviders := []string{} + challengeRedirectingIdentityProviders := []string{} + + // TODO move to ValidateIdentityProviders (plural) + for i, identityProvider := range spec.IdentityProviders { + if isUsedAsChallenger(identityProvider.IdentityProviderConfig) { + // TODO fix CAO to properly let you use request header and other challengers by disabling the other ones on CLI + // RequestHeaderIdentityProvider is special, it can only react to challenge clients by redirecting them + // Make sure we don't have more than a single redirector, and don't have a mix of challenge issuers and redirectors + if identityProvider.Type == configv1.IdentityProviderTypeRequestHeader { + challengeRedirectingIdentityProviders = append(challengeRedirectingIdentityProviders, identityProvider.Name) + } else { + challengeIssuingIdentityProviders = append(challengeIssuingIdentityProviders, identityProvider.Name) + } + } + + identityProviderPath := specPath.Child("identityProviders").Index(i) + errs = append(errs, ValidateIdentityProvider(identityProvider, identityProviderPath)...) + + if len(identityProvider.Name) > 0 { + if providerNames.Has(identityProvider.Name) { + errs = append(errs, field.Invalid(identityProviderPath.Child("name"), identityProvider.Name, "must have a unique name")) + } + providerNames.Insert(identityProvider.Name) + } + } + + if len(challengeRedirectingIdentityProviders) > 1 { + errs = append(errs, field.Invalid(specPath.Child("identityProviders"), "", fmt.Sprintf("only one identity provider can redirect clients requesting an authentication challenge, found: %v", strings.Join(challengeRedirectingIdentityProviders, ", ")))) + } + if len(challengeRedirectingIdentityProviders) > 0 && len(challengeIssuingIdentityProviders) > 0 { + errs = append(errs, field.Invalid(specPath.Child("identityProviders"), "", fmt.Sprintf( + "cannot mix providers that redirect clients requesting auth challenges (%s) with providers issuing challenges to those clients (%s)", + strings.Join(challengeRedirectingIdentityProviders, ", "), + strings.Join(challengeIssuingIdentityProviders, ", "), + ))) + } + + // TODO move to ValidateTokenConfig + timeout := spec.TokenConfig.AccessTokenInactivityTimeout + if timeout != nil && timeout.Seconds() < MinimumInactivityTimeoutSeconds { + errs = append(errs, field.Invalid( + specPath.Child("tokenConfig", "accessTokenInactivityTimeout"), timeout, + fmt.Sprintf("the minimum acceptable token timeout value is %d seconds", + MinimumInactivityTimeoutSeconds))) + } + + if tokenMaxAge := spec.TokenConfig.AccessTokenMaxAgeSeconds; tokenMaxAge < 0 { + errs = append(errs, field.Invalid(specPath.Child("tokenConfig", "accessTokenMaxAgeSeconds"), tokenMaxAge, "must be a positive integer or 0")) + } + + // TODO move to ValidateTemplates + errs = append(errs, crvalidation.ValidateSecretReference(specPath.Child("templates", "login"), spec.Templates.Login, false)...) + errs = append(errs, crvalidation.ValidateSecretReference(specPath.Child("templates", "providerSelection"), spec.Templates.ProviderSelection, false)...) + errs = append(errs, crvalidation.ValidateSecretReference(specPath.Child("templates", "error"), spec.Templates.Error, false)...) + + return errs +} + +// if you change this, update the peer in user validation. also, don't change this. +func validateIdentityProviderName(name string) []string { + if reasons := path.ValidatePathSegmentName(name, false); len(reasons) != 0 { + return reasons + } + + if strings.Contains(name, ":") { + return []string{`may not contain ":"`} + } + return nil +} + +func ValidateIdentityProvider(identityProvider configv1.IdentityProvider, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + + if len(identityProvider.Name) == 0 { + errs = append(errs, field.Required(fldPath.Child("name"), "")) + } else if reasons := validateIdentityProviderName(identityProvider.Name); len(reasons) != 0 { + errs = append(errs, field.Invalid(fldPath.Child("name"), identityProvider.Name, strings.Join(reasons, ", "))) + } + + if len(identityProvider.MappingMethod) > 0 && !validMappingMethods.Has(string(identityProvider.MappingMethod)) { + errs = append(errs, field.NotSupported(fldPath.Child("mappingMethod"), identityProvider.MappingMethod, validMappingMethods.List())) + } + + provider := identityProvider.IdentityProviderConfig + // create a copy of the provider to simplify checking that only one IdPs is set + providerCopy := provider.DeepCopy() + switch provider.Type { + case "": + errs = append(errs, field.Required(fldPath.Child("type"), "")) + + case configv1.IdentityProviderTypeRequestHeader: + errs = append(errs, ValidateRequestHeaderIdentityProvider(provider.RequestHeader, fldPath)...) + providerCopy.RequestHeader = nil + + case configv1.IdentityProviderTypeBasicAuth: + // TODO move to ValidateBasicAuthIdentityProvider for consistency + if provider.BasicAuth == nil { + errs = append(errs, field.Required(fldPath.Child("basicAuth"), "")) + } else { + errs = append(errs, ValidateRemoteConnectionInfo(provider.BasicAuth.OAuthRemoteConnectionInfo, fldPath.Child("basicAuth"))...) + } + providerCopy.BasicAuth = nil + + case configv1.IdentityProviderTypeHTPasswd: + // TODO move to ValidateHTPasswdIdentityProvider for consistency + if provider.HTPasswd == nil { + errs = append(errs, field.Required(fldPath.Child("htpasswd"), "")) + } else { + errs = append(errs, crvalidation.ValidateSecretReference(fldPath.Child("htpasswd", "fileData"), provider.HTPasswd.FileData, true)...) + } + providerCopy.HTPasswd = nil + + case configv1.IdentityProviderTypeLDAP: + errs = append(errs, ValidateLDAPIdentityProvider(provider.LDAP, fldPath.Child("ldap"))...) + providerCopy.LDAP = nil + + case configv1.IdentityProviderTypeKeystone: + errs = append(errs, ValidateKeystoneIdentityProvider(provider.Keystone, fldPath.Child("keystone"))...) + providerCopy.Keystone = nil + + case configv1.IdentityProviderTypeGitHub: + errs = append(errs, ValidateGitHubIdentityProvider(provider.GitHub, identityProvider.MappingMethod, fldPath.Child("github"))...) + providerCopy.GitHub = nil + + case configv1.IdentityProviderTypeGitLab: + errs = append(errs, ValidateGitLabIdentityProvider(provider.GitLab, fldPath.Child("gitlab"))...) + providerCopy.GitLab = nil + + case configv1.IdentityProviderTypeGoogle: + errs = append(errs, ValidateGoogleIdentityProvider(provider.Google, identityProvider.MappingMethod, fldPath.Child("google"))...) + providerCopy.Google = nil + + case configv1.IdentityProviderTypeOpenID: + errs = append(errs, ValidateOpenIDIdentityProvider(provider.OpenID, fldPath.Child("openID"))...) + providerCopy.OpenID = nil + + default: + errs = append(errs, field.Invalid(fldPath.Child("type"), identityProvider.Type, "not a valid provider type")) + } + + if !pointerutil.AllPtrFieldsNil(providerCopy) { + errs = append(errs, field.Invalid(fldPath, identityProvider.IdentityProviderConfig, "only one identity provider can be configured in single object")) + } + + return errs +} + +func ValidateOAuthIdentityProvider(clientID string, clientSecretRef configv1.SecretNameReference, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(clientID) == 0 { + allErrs = append(allErrs, field.Required(fieldPath.Child("clientID"), "")) + } + + allErrs = append(allErrs, crvalidation.ValidateSecretReference(fieldPath.Child("clientSecret"), clientSecretRef, true)...) + + return allErrs +} + +func isUsedAsChallenger(idp configv1.IdentityProviderConfig) bool { + // TODO this is wrong and needs to be more dynamic... + switch idp.Type { + // whitelist all the IdPs that we set `UseAsChallenger: true` in cluster-authentication-operator + case configv1.IdentityProviderTypeBasicAuth, configv1.IdentityProviderTypeGitLab, + configv1.IdentityProviderTypeHTPasswd, configv1.IdentityProviderTypeKeystone, + configv1.IdentityProviderTypeLDAP, + // guard open ID for now because it *could* have challenge in the future + configv1.IdentityProviderTypeOpenID: + return true + case configv1.IdentityProviderTypeRequestHeader: + if idp.RequestHeader == nil { + // this is an error reported elsewhere + return false + } + return len(idp.RequestHeader.ChallengeURL) > 0 + default: + return false + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp_test.go new file mode 100644 index 0000000000000..af0aa6cfa4d82 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp_test.go @@ -0,0 +1,429 @@ +package oauth + +import ( + "fmt" + "reflect" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" +) + +const wrongConfigMapSecretErrMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')" + +func htpasswdIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeHTPasswd, + HTPasswd: &configv1.HTPasswdIdentityProvider{ + FileData: configv1.SecretNameReference{ + Name: "innocent.llama", + }, + }, + } +} + +func TestValidateOAuthSpec(t *testing.T) { + doubledIdPs := configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeHTPasswd, + HTPasswd: &configv1.HTPasswdIdentityProvider{ + FileData: configv1.SecretNameReference{ + Name: "innocent.llama", + }, + }, + GitLab: &configv1.GitLabIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-gitlab-secret"}, + URL: "https://thisgitlabinstancerighthere.com", + CA: configv1.ConfigMapNameReference{Name: "letsencrypt-for-gitlab.instance"}, + }, + } + + type args struct { + spec configv1.OAuthSpec + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "empty object", + args: args{ + spec: configv1.OAuthSpec{}, + }, + }, + { + name: "more than one challenge issuing IdPs", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "htpasswd", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "ldap", + IdentityProviderConfig: ldapIDP(), + }, + }, + }, + }, + }, + { + name: "more than one challenge redirecting IdPs", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "sso1", + IdentityProviderConfig: requestHeaderIDP(true, true), + }, + { + Name: "sso2", + IdentityProviderConfig: requestHeaderIDP(true, false), + }, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "identityProviders"), "", "only one identity provider can redirect clients requesting an authentication challenge, found: sso1, sso2"), + }, + }, + { + name: "mixing challenge issuing and redirecting IdPs", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "sso", + IdentityProviderConfig: requestHeaderIDP(true, false), + }, + { + Name: "ldap", + IdentityProviderConfig: ldapIDP(), + }, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "identityProviders"), "", "cannot mix providers that redirect clients requesting auth challenges (sso) with providers issuing challenges to those clients (ldap)"), + }, + }, + { + name: "two IdPs with the same name", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "aname", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "bname", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "aname", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "cname", + IdentityProviderConfig: htpasswdIDP(), + }, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "identityProviders").Index(2).Child("name"), "aname", "must have a unique name"), + }, + }, + { + name: "negative token inactivity timeout", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeout: &metav1.Duration{Duration: -50 * time.Second}, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "tokenConfig", "accessTokenInactivityTimeout"), metav1.Duration{Duration: -50 * time.Second}, fmt.Sprintf("the minimum acceptable token timeout value is %d seconds", MinimumInactivityTimeoutSeconds)), + }, + }, + { + name: "positive token inactivity timeout", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeout: &metav1.Duration{Duration: 32578 * time.Second}, + }, + }, + }, + }, + { + name: "zero token inactivity timeout", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeout: &metav1.Duration{Duration: 0}, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "tokenConfig", "accessTokenInactivityTimeout"), metav1.Duration{Duration: 0 * time.Second}, fmt.Sprintf("the minimum acceptable token timeout value is %d seconds", MinimumInactivityTimeoutSeconds)), + }, + }, + { + name: "token inactivity timeout lower than the api constant minimum", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeout: &metav1.Duration{Duration: 250 * time.Second}, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "tokenConfig", "accessTokenInactivityTimeout"), metav1.Duration{Duration: 250 * time.Second}, fmt.Sprintf("the minimum acceptable token timeout value is %d seconds", MinimumInactivityTimeoutSeconds)), + }, + }, + { + name: "negative token max age", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenMaxAgeSeconds: -20, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "tokenConfig", "accessTokenMaxAgeSeconds"), -20, "must be a positive integer or 0"), + }, + }, + { + name: "positive token max age", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenMaxAgeSeconds: 213123, + }, + }, + }, + }, + { + name: "zero token max age", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenMaxAgeSeconds: 0, + }, + }, + }, + }, + { + name: "template names all messed up", + args: args{ + spec: configv1.OAuthSpec{ + Templates: configv1.OAuthTemplates{ + Login: configv1.SecretNameReference{Name: "/this/is/wrong.html"}, + ProviderSelection: configv1.SecretNameReference{Name: "also_wrong"}, + Error: configv1.SecretNameReference{Name: "the&very+woRst"}, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "templates", "login", "name"), "/this/is/wrong.html", wrongConfigMapSecretErrMsg), + field.Invalid(field.NewPath("spec", "templates", "providerSelection", "name"), "also_wrong", wrongConfigMapSecretErrMsg), + field.Invalid(field.NewPath("spec", "templates", "error", "name"), "the&very+woRst", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "everything set properly", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "some_httpasswd", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "sso", + IdentityProviderConfig: requestHeaderIDP(false, true), + }, + }, + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeout: &metav1.Duration{Duration: 300 * time.Second}, + AccessTokenMaxAgeSeconds: 216000, + }, + Templates: configv1.OAuthTemplates{ + Login: configv1.SecretNameReference{Name: "my-login-template"}, + ProviderSelection: configv1.SecretNameReference{Name: "provider-selection.template"}, + Error: configv1.SecretNameReference{Name: "a.template-with-error"}, + }, + }, + }, + }, + { + name: "two different IdPs in one object", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "bad_bad_config", + IdentityProviderConfig: doubledIdPs, + }, + }, + TokenConfig: configv1.TokenConfig{ + AccessTokenMaxAgeSeconds: 216000, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "identityProviders").Index(0), doubledIdPs, "only one identity provider can be configured in single object"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := validateOAuthSpec(tt.args.spec) + + // DeepEqual does not seem to be working well here + var failedCheck bool + if len(got) != len(tt.want) { + failedCheck = true + } else { + // Check all the errors + for i := range got { + if got[i].Error() != tt.want[i].Error() { + failedCheck = true + break + } + } + } + + if failedCheck { + t.Errorf("validateOAuthSpec() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestValidateIdentityProvider(t *testing.T) { + type args struct { + identityProvider configv1.IdentityProvider + fldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "empty provider needs at least name and type in provider", + args: args{ + identityProvider: configv1.IdentityProvider{}, + }, + want: field.ErrorList{ + field.Required(field.NewPath("name"), ""), + field.Required(field.NewPath("type"), ""), + }, + }, + { + name: "unknown type name", + args: args{ + identityProvider: configv1.IdentityProvider{ + Name: "providingProvider", + IdentityProviderConfig: configv1.IdentityProviderConfig{ + Type: "someText", + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("type"), "someText", "not a valid provider type"), + }, + }, + { + name: "basic provider", + args: args{ + identityProvider: configv1.IdentityProvider{ + Name: "providingProvider", + IdentityProviderConfig: htpasswdIDP(), + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ValidateIdentityProvider(tt.args.identityProvider, tt.args.fldPath) + // DeepEqual does not seem to be working well here + var failedCheck bool + if len(got) != len(tt.want) { + failedCheck = true + } else { + // Check all the errors + for i := range got { + if got[i].Error() != tt.want[i].Error() { + failedCheck = true + break + } + } + } + + if failedCheck { + t.Errorf("ValidateIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestValidateOAuthIdentityProvider(t *testing.T) { + type args struct { + clientID string + clientSecretRef configv1.SecretNameReference + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "empty client ID and secret ref", + args: args{ + clientID: "", + clientSecretRef: configv1.SecretNameReference{}, + }, + want: field.ErrorList{ + field.Required(field.NewPath("clientID"), ""), + field.Required(field.NewPath("clientSecret", "name"), ""), + }, + }, + { + name: "improper client secret refname", + args: args{ + clientID: "thisBeClient", + clientSecretRef: configv1.SecretNameReference{Name: "terribleName_forASecret"}, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("clientSecret", "name"), "terribleName_forASecret", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "working example", + args: args{ + clientID: "thisBeClient", + clientSecretRef: configv1.SecretNameReference{Name: "client-secret-hideout"}, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateOAuthIdentityProvider(tt.args.clientID, tt.args.clientSecretRef, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateOAuthIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone.go new file mode 100644 index 0000000000000..e1bf7cb76aed2 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone.go @@ -0,0 +1,23 @@ +package oauth + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" +) + +func ValidateKeystoneIdentityProvider(provider *configv1.KeystoneIdentityProvider, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if provider == nil { + errs = append(errs, field.Required(fldPath, "")) + return errs + } + + errs = append(errs, ValidateRemoteConnectionInfo(provider.OAuthRemoteConnectionInfo, fldPath)...) + + if len(provider.DomainName) == 0 { + errs = append(errs, field.Required(field.NewPath("domainName"), "")) + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone_test.go new file mode 100644 index 0000000000000..6ccdddb7b9ebf --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone_test.go @@ -0,0 +1,96 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func keystoneIdP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeKeystone, + Keystone: &configv1.KeystoneIdentityProvider{ + OAuthRemoteConnectionInfo: configv1.OAuthRemoteConnectionInfo{ + URL: "https://somewhere.over.rainbow/ks", + CA: configv1.ConfigMapNameReference{Name: "govt-ca"}, + }, + DomainName: "production", + }, + } +} + +func TestValidateKeystoneIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.KeystoneIdentityProvider + fldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "empty url", + args: args{ + provider: &configv1.KeystoneIdentityProvider{ + OAuthRemoteConnectionInfo: configv1.OAuthRemoteConnectionInfo{ + URL: "", + }, + DomainName: "production", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("url"), ""), + }, + }, + { + name: "http url", + args: args{ + provider: &configv1.KeystoneIdentityProvider{ + OAuthRemoteConnectionInfo: configv1.OAuthRemoteConnectionInfo{ + URL: "http://foo", + }, + DomainName: "production", + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("url"), "http://foo", "must use https scheme"), + }, + }, + { + name: "missing domain name", + args: args{ + provider: &configv1.KeystoneIdentityProvider{ + OAuthRemoteConnectionInfo: configv1.OAuthRemoteConnectionInfo{ + URL: "https://keystone.openstack.nasa.gov/", + }, + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("domainName"), ""), + }, + }, + { + name: "working provider", + args: args{ + provider: keystoneIdP().Keystone, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateKeystoneIdentityProvider(tt.args.provider, tt.args.fldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateKeystoneIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap.go new file mode 100644 index 0000000000000..b5f40060b9cc9 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap.go @@ -0,0 +1,66 @@ +package oauth + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/security/ldaputil" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func ValidateLDAPIdentityProvider(provider *configv1.LDAPIdentityProvider, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + + if provider == nil { + errs = append(errs, field.Required(fldPath, "")) + return errs + } + + errs = append(errs, validateLDAPClientConfig(provider.URL, provider.BindDN, provider.BindPassword.Name, provider.CA.Name, provider.Insecure, fldPath)...) + errs = append(errs, crvalidation.ValidateSecretReference(fldPath.Child("bindPassword"), provider.BindPassword, false)...) + errs = append(errs, crvalidation.ValidateConfigMapReference(fldPath.Child("ca"), provider.CA, false)...) + + // At least one attribute to use as the user id is required + if len(provider.Attributes.ID) == 0 { + errs = append(errs, field.Invalid(fldPath.Child("attributes", "id"), "[]", "at least one id attribute is required (LDAP standard identity attribute is 'dn')")) + } + + return errs +} + +// TODO clean this up +func validateLDAPClientConfig(url, bindDN, bindPasswordRef, CA string, insecure bool, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + + // Make sure bindDN and bindPassword are both set, or both unset + // Both unset means an anonymous bind is used for search (https://tools.ietf.org/html/rfc4513#section-5.1.1) + // Both set means the name/password simple bind is used for search (https://tools.ietf.org/html/rfc4513#section-5.1.3) + if (len(bindDN) == 0) != (len(bindPasswordRef) == 0) { + errs = append(errs, field.Invalid(fldPath.Child("bindDN"), bindDN, "bindDN and bindPassword must both be specified, or both be empty")) + errs = append(errs, field.Invalid(fldPath.Child("bindPassword").Child("name"), bindPasswordRef, "bindDN and bindPassword must both be specified, or both be empty")) + } + + if len(url) == 0 { + errs = append(errs, field.Required(fldPath.Child("url"), "")) + return errs + } + + u, err := ldaputil.ParseURL(url) + if err != nil { + errs = append(errs, field.Invalid(fldPath.Child("url"), url, err.Error())) + return errs + } + + if insecure { + if u.Scheme == ldaputil.SchemeLDAPS { + errs = append(errs, field.Invalid(fldPath.Child("url"), url, fmt.Sprintf("Cannot use %s scheme with insecure=true", u.Scheme))) + } + if len(CA) > 0 { + errs = append(errs, field.Invalid(fldPath.Child("ca"), CA, "Cannot specify a ca with insecure=true")) + } + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap_test.go new file mode 100644 index 0000000000000..85daa9e182541 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap_test.go @@ -0,0 +1,101 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func ldapIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeLDAP, + LDAP: &configv1.LDAPIdentityProvider{ + Attributes: configv1.LDAPAttributeMapping{ + ID: []string{"memberUid"}, + }, + BindDN: "uid=readallaccount,ou=privileged,dc=coolpeople,dc=se", + BindPassword: configv1.SecretNameReference{ + Name: "ldap-secret", + }, + CA: configv1.ConfigMapNameReference{Name: "ldap-ca-configmap"}, + Insecure: false, + URL: "ldaps://ldapinstance.corporate.coolpeople.se/ou=Groups,dc=coolpeople,dc=se?memberUid?sub", + }, + } +} + +func TestValidateLDAPIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.LDAPIdentityProvider + fldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "invalid bindPassword ref name, missing ID", + args: args{ + provider: &configv1.LDAPIdentityProvider{ + BindPassword: configv1.SecretNameReference{Name: "bad_refname"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("bindDN"), "", "bindDN and bindPassword must both be specified, or both be empty"), + field.Invalid(field.NewPath("bindPassword", "name"), "bad_refname", "bindDN and bindPassword must both be specified, or both be empty"), + field.Required(field.NewPath("url"), ""), + field.Invalid(field.NewPath("bindPassword", "name"), "bad_refname", wrongConfigMapSecretErrMsg), + field.Invalid(field.NewPath("attributes", "id"), "[]", "at least one id attribute is required (LDAP standard identity attribute is 'dn')"), + }, + }, + { + name: "invalid url", + args: args{ + provider: &configv1.LDAPIdentityProvider{ + URL: "https://foo", + Attributes: configv1.LDAPAttributeMapping{ + ID: []string{"uid"}, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("url"), "https://foo", `invalid scheme "https"`), + }, + }, + { + name: "minimal passing provider", + args: args{ + provider: &configv1.LDAPIdentityProvider{ + URL: "ldap://foo", + Attributes: configv1.LDAPAttributeMapping{ + ID: []string{"uid"}, + }, + }, + }, + want: field.ErrorList{}, + }, + { + name: "more complicated use", + args: args{ + provider: ldapIDP().LDAP, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateLDAPIdentityProvider(tt.args.provider, tt.args.fldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateLDAPIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_oauth.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_oauth.go new file mode 100644 index 0000000000000..eec9bf57532e2 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_oauth.go @@ -0,0 +1,111 @@ +package oauth + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateOAuth" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return crvalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.GroupVersion.WithResource("oauths").GroupResource(): true, + }, + map[schema.GroupVersionKind]crvalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("OAuth"): oauthV1{}, + }) + }) +} + +func toOAuthV1(uncastObj runtime.Object) (*configv1.OAuth, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + errs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.OAuth) + if !ok { + return nil, append(errs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"OAuth"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type oauthV1 struct{} + +func (oauthV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, errs := toOAuthV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, crvalidation.RequireNameCluster, field.NewPath("metadata"))...) + errs = append(errs, validateOAuthSpecCreate(obj.Spec)...) + + return errs +} + +func (oauthV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toOAuthV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toOAuthV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateOAuthSpecUpdate(obj.Spec, oldObj.Spec)...) + + return errs +} + +func (oauthV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toOAuthV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toOAuthV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateOAuthStatus(obj.Status)...) + + return errs +} + +func validateOAuthSpecCreate(spec configv1.OAuthSpec) field.ErrorList { + return validateOAuthSpec(spec) +} + +func validateOAuthSpecUpdate(newspec, oldspec configv1.OAuthSpec) field.ErrorList { + return validateOAuthSpec(newspec) +} + +func validateOAuthStatus(status configv1.OAuthStatus) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid.go new file mode 100644 index 0000000000000..41d8c35db3f91 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid.go @@ -0,0 +1,54 @@ +package oauth + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/validation" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func ValidateOpenIDIdentityProvider(provider *configv1.OpenIDIdentityProvider, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if provider == nil { + allErrs = append(allErrs, field.Required(fieldPath, "")) + return allErrs + } + + allErrs = append(allErrs, ValidateOAuthIdentityProvider(provider.ClientID, provider.ClientSecret, fieldPath)...) + + if provider.Issuer != strings.TrimRight(provider.Issuer, "/") { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("issuer"), provider.Issuer, "cannot end with '/'")) + } + + // The specs are a bit ambiguous on whether this must or needn't be https:// + // schema, but they do require (MUST) TLS support for the discovery and we do + // require this in out API description + // https://openid.net/specs/openid-connect-discovery-1_0.html#TLSRequirements + url, issuerErrs := validation.ValidateSecureURL(provider.Issuer, fieldPath.Child("issuer")) + allErrs = append(allErrs, issuerErrs...) + if len(url.RawQuery) > 0 || len(url.Fragment) > 0 { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("issuer"), provider.Issuer, "must not specify query or fragment component")) + } + + allErrs = append(allErrs, crvalidation.ValidateConfigMapReference(fieldPath.Child("ca"), provider.CA, false)...) + + for i, scope := range provider.ExtraScopes { + // https://tools.ietf.org/html/rfc6749#section-3.3 (full list of allowed chars is %x21 / %x23-5B / %x5D-7E) + // for those without an ascii table, that's `!`, `#-[`, `]-~` inclusive. + for _, ch := range scope { + switch { + case ch == '!': + case ch >= '#' && ch <= '[': + case ch >= ']' && ch <= '~': + default: + allErrs = append(allErrs, field.Invalid(fieldPath.Child("extraScopes").Index(i), scope, fmt.Sprintf("cannot contain %v", ch))) + } + } + } + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid_test.go new file mode 100644 index 0000000000000..2c243bcccaa47 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid_test.go @@ -0,0 +1,125 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func openidIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeOpenID, + OpenID: &configv1.OpenIDIdentityProvider{ + ClientID: "readallPerson", + ClientSecret: configv1.SecretNameReference{Name: "oidc-secret"}, + Issuer: "https://oidc-friendly.domain.com", + CA: configv1.ConfigMapNameReference{Name: "oidc-ca"}, + ExtraScopes: []string{"email", "profile"}, + ExtraAuthorizeParameters: map[string]string{ + "include_granted_scopes": "true", + }, + Claims: configv1.OpenIDClaims{ + PreferredUsername: []string{"full_name", "email"}, + Email: []string{"email"}, + }, + }, + } +} + +func TestValidateOpenIDIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.OpenIDIdentityProvider + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "missing client ID and secret", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + Issuer: "https://bigcorp.oidc.com", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("clientID"), ""), + field.Required(field.NewPath("clientSecret", "name"), ""), + }, + }, + { + name: "missing issuer", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + ClientID: "readallPerson", + ClientSecret: configv1.SecretNameReference{Name: "oidc-secret"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("issuer"), "", "must contain a scheme (e.g. https://)"), + field.Invalid(field.NewPath("issuer"), "", "must contain a host"), + }, + }, + { + name: "issuer with http:// scheme", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + ClientID: "gentleDolphin", + ClientSecret: configv1.SecretNameReference{Name: "seemsliggit"}, + Issuer: "http://oidc-friendly.domain.com", + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("issuer"), "http://oidc-friendly.domain.com", "must use https scheme"), + }, + }, + { + name: "bad CA refname", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + ClientID: "readallPerson", + ClientSecret: configv1.SecretNameReference{Name: "oidc-secret"}, + Issuer: "https://oidc-friendly.domain.com", + CA: configv1.ConfigMapNameReference{Name: "the_Nameofaca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("ca", "name"), "the_Nameofaca", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "minimal working example", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + ClientID: "readallPerson", + ClientSecret: configv1.SecretNameReference{Name: "oidc-secret"}, + Issuer: "https://oidc-friendly.domain.com", + }, + }, + want: field.ErrorList{}, + }, + { + name: "more complicated use", + args: args{ + provider: openidIDP().OpenID, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateOpenIDIdentityProvider(tt.args.provider, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateOpenIDIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader.go new file mode 100644 index 0000000000000..93b7c5844cd4f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader.go @@ -0,0 +1,85 @@ +package oauth + +import ( + "fmt" + "net/url" + "path" + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/validation" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const ( + // URLToken in the query of the redirectURL gets replaced with the original request URL, escaped as a query parameter. + // Example use: https://www.example.com/login?then=${url} + urlToken = "${url}" + + // QueryToken in the query of the redirectURL gets replaced with the original request URL, unescaped. + // Example use: https://www.example.com/sso/oauth/authorize?${query} + queryToken = "${query}" +) + +func ValidateRequestHeaderIdentityProvider(provider *configv1.RequestHeaderIdentityProvider, fieldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if provider == nil { + errs = append(errs, field.Required(fieldPath, "")) + return errs + } + + errs = append(errs, crvalidation.ValidateConfigMapReference(fieldPath.Child("ca"), provider.ClientCA, true)...) + + if len(provider.Headers) == 0 { + errs = append(errs, field.Required(fieldPath.Child("headers"), "")) + } + + if len(provider.ChallengeURL) == 0 && len(provider.LoginURL) == 0 { + errs = append(errs, field.Required(fieldPath, "at least one of challengeURL or loginURL must be specified")) + } + + if len(provider.ChallengeURL) > 0 { + u, urlErrs := validation.ValidateURL(provider.ChallengeURL, fieldPath.Child("challengeURL")) + errs = append(errs, urlErrs...) + if len(urlErrs) == 0 { + if !hasParamToken(u) { + errs = append(errs, + field.Invalid(field.NewPath("challengeURL"), provider.ChallengeURL, + fmt.Sprintf("query does not include %q or %q, redirect will not preserve original authorize parameters", urlToken, queryToken)), + ) + } + } + } + + if len(provider.LoginURL) > 0 { + u, urlErrs := validation.ValidateURL(provider.LoginURL, fieldPath.Child("loginURL")) + errs = append(errs, urlErrs...) + if len(urlErrs) == 0 { + if !hasParamToken(u) { + errs = append(errs, + field.Invalid(fieldPath.Child("loginURL"), provider.LoginURL, + fmt.Sprintf("query does not include %q or %q, redirect will not preserve original authorize parameters", urlToken, queryToken), + ), + ) + } + if strings.HasSuffix(u.Path, "/") { + errs = append(errs, + field.Invalid(fieldPath.Child("loginURL"), provider.LoginURL, `path ends with "/", grant approval flows will not function correctly`), + ) + } + if _, file := path.Split(u.Path); file != "authorize" { + errs = append(errs, + field.Invalid(fieldPath.Child("loginURL"), provider.LoginURL, `path does not end with "/authorize", grant approval flows will not function correctly`), + ) + } + } + } + + return errs +} + +func hasParamToken(u *url.URL) bool { + return strings.Contains(u.RawQuery, urlToken) || strings.Contains(u.RawQuery, queryToken) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader_test.go new file mode 100644 index 0000000000000..44e590f0b2b5e --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader_test.go @@ -0,0 +1,193 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func requestHeaderIDP(challenge, login bool) configv1.IdentityProviderConfig { + var challengeURL, loginURL string + + if challenge { + challengeURL = "https://sso.corporate.coolpeople.se/challenges/oauth/authorize?${query}" + } + if login { + loginURL = "https://sso.corporate.coolpeople.se/loginz/oauth/authorize?${query}" + } + + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeRequestHeader, + RequestHeader: &configv1.RequestHeaderIdentityProvider{ + LoginURL: loginURL, + ChallengeURL: challengeURL, + ClientCA: configv1.ConfigMapNameReference{ + Name: "coolpeople-client-ca", + }, + ClientCommonNames: []string{"authn-proxy"}, + Headers: []string{"X-Remote-User", "SSO-User"}, + NameHeaders: []string{"X-Remote-User-Display-Name"}, + }, + } +} + +func TestValidateRequestHeaderIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.RequestHeaderIdentityProvider + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "empty provider", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{}, + }, + want: field.ErrorList{ + field.Required(field.NewPath("ca", "name"), ""), + field.Required(field.NewPath("headers"), ""), + {Type: field.ErrorTypeRequired, Field: "", BadValue: "", Detail: "at least one of challengeURL or loginURL must be specified"}, + }, + }, + { + name: "wrong ca refname", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + LoginURL: "http://oauth.coolpeoplecorp.com/login/authorize?${query}", + Headers: []string{"X-Remote-User"}, + ClientCA: configv1.ConfigMapNameReference{Name: "dat_badrefname"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("ca", "name"), "dat_badrefname", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "challenge url without query, no client CA set", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + ChallengeURL: "http://oauth.coolpeoplecorp.com/challenge-endpoint", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("ca", "name"), ""), + field.Invalid(field.NewPath("challengeURL"), "http://oauth.coolpeoplecorp.com/challenge-endpoint", "query does not include \"${url}\" or \"${query}\", redirect will not preserve original authorize parameters"), + }, + }, + { + name: "challenge url with query - no ${url}, ${query}", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + ChallengeURL: "http://oauth.coolpeoplecorp.com/challenge-endpoint?${sender}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("challengeURL"), "http://oauth.coolpeoplecorp.com/challenge-endpoint?${sender}", "query does not include \"${url}\" or \"${query}\", redirect will not preserve original authorize parameters"), + }, + }, + { + name: "challenge url with query - ${url}", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + ChallengeURL: "http://oauth.coolpeoplecorp.com/challenge-endpoint?${url}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{}, + }, + { + name: "login url without query and authorize", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/challenge-endpoint", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/challenge-endpoint", "query does not include \"${url}\" or \"${query}\", redirect will not preserve original authorize parameters"), + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/challenge-endpoint", "path does not end with \"/authorize\", grant approval flows will not function correctly"), + }, + }, + { + name: "login url with query - no ${url}, ${query} - no client CA set", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/login-endpoint/authorize?${custom}", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("ca", "name"), ""), + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/login-endpoint/authorize?${custom}", "query does not include \"${url}\" or \"${query}\", redirect will not preserve original authorize parameters"), + }, + }, + { + name: "login url with query - ${query} - no /authorize", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/login-endpoint?${query}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/login-endpoint?${query}", "path does not end with \"/authorize\", grant approval flows will not function correctly"), + }, + }, + { + name: "login url with query - ${query} - ends with /", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/login-endpoint/authorize/?${query}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/login-endpoint/authorize/?${query}", "path ends with \"/\", grant approval flows will not function correctly"), + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/login-endpoint/authorize/?${query}", "path does not end with \"/authorize\", grant approval flows will not function correctly"), + }, + }, + { + name: "login url with query - ${query}", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/login-endpoint/authorize?${query}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{}, + }, + { + name: "more complicated use", + args: args{ + provider: requestHeaderIDP(true, true).RequestHeader, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateRequestHeaderIdentityProvider(tt.args.provider, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateRequestHeaderIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/operator/deny_delete_cluster_operator_resource.go b/openshift-kube-apiserver/admission/customresourcevalidation/operator/deny_delete_cluster_operator_resource.go new file mode 100644 index 0000000000000..f4cb78543ccef --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/operator/deny_delete_cluster_operator_resource.go @@ -0,0 +1,52 @@ +package operator + +import ( + "context" + "fmt" + "io" + + "k8s.io/apiserver/pkg/admission" +) + +const PluginName = "operator.openshift.io/DenyDeleteClusterOperators" + +// Register registers an admission plugin factory whose plugin prevents the deletion of cluster operator resources. +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return newAdmissionPlugin(), nil + }) +} + +var _ admission.ValidationInterface = &admissionPlugin{} + +type admissionPlugin struct { + *admission.Handler +} + +func newAdmissionPlugin() *admissionPlugin { + return &admissionPlugin{Handler: admission.NewHandler(admission.Delete)} +} + +// Validate returns an error if there is an attempt to delete a cluster operator resource. +func (p *admissionPlugin) Validate(ctx context.Context, attributes admission.Attributes, _ admission.ObjectInterfaces) error { + if len(attributes.GetSubresource()) > 0 { + return nil + } + if attributes.GetResource().Group != "operator.openshift.io" { + return nil + } + switch attributes.GetResource().Resource { + // Deletion is denied for storages.operator.openshift.io objects named cluster, + // because MCO and KCM-O depend on this resource being present in order to + // correctly set environment variables on kubelet and kube-controller-manager. + case "storages": + if attributes.GetName() != "cluster" { + return nil + } + // Deletion is allowed for all other operator.openshift.io objects unless + // explicitly listed above. + default: + return nil + } + return admission.NewForbidden(attributes, fmt.Errorf("deleting required %s.%s resource, named %s, is not allowed", attributes.GetResource().Resource, attributes.GetResource().Group, attributes.GetName())) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/operator/deny_delete_cluster_operator_resource_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/operator/deny_delete_cluster_operator_resource_test.go new file mode 100644 index 0000000000000..6b0eaa5cc911d --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/operator/deny_delete_cluster_operator_resource_test.go @@ -0,0 +1,73 @@ +package operator + +import ( + "context" + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" +) + +func TestAdmissionPlugin_Validate(t *testing.T) { + testCases := []struct { + tcName string + group string + resource string + name string + denyDelete bool + }{ + { + tcName: "NotBlackListedResourceNamedCluster", + group: "operator.openshift.io", + resource: "notBlacklisted", + name: "cluster", + denyDelete: false, + }, + { + tcName: "NotBlackListedResourceNamedNotCluster", + group: "operator.openshift.io", + resource: "notBlacklisted", + name: "notCluster", + denyDelete: false, + }, + { + tcName: "StorageResourceNamedCluster", + group: "operator.openshift.io", + resource: "storages", + name: "cluster", + denyDelete: true, + }, + { + tcName: "StorageResourceNamedNotCluster", + group: "operator.openshift.io", + resource: "storages", + name: "notCluster", + denyDelete: false, + }, + { + tcName: "ClusterVersionNotVersion", + group: "config.openshift.io", + resource: "clusterversions", + name: "instance", + denyDelete: false, + }, + { + tcName: "OtherGroup", + group: "not.operator.openshift.io", + resource: "notBlacklisted", + name: "cluster", + denyDelete: false, + }, + } + for _, tc := range testCases { + t.Run(tc.tcName, func(t *testing.T) { + err := newAdmissionPlugin().Validate(context.TODO(), admission.NewAttributesRecord( + nil, nil, schema.GroupVersionKind{}, "", + tc.name, schema.GroupVersionResource{Group: tc.group, Resource: tc.resource}, + "", admission.Delete, nil, false, nil), nil) + if tc.denyDelete != (err != nil) { + t.Error(tc.denyDelete, err) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/project/validate_project.go b/openshift-kube-apiserver/admission/customresourcevalidation/project/validate_project.go new file mode 100644 index 0000000000000..d0e1af58ab999 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/project/validate_project.go @@ -0,0 +1,112 @@ +package project + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + validationutil "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateProject" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("projects"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Project"): projectV1{}, + }) + }) +} + +func toProjectV1(uncastObj runtime.Object) (*configv1.Project, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Project) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Project"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type projectV1 struct { +} + +func validateProjectSpec(spec configv1.ProjectSpec) field.ErrorList { + allErrs := field.ErrorList{} + + if len(spec.ProjectRequestMessage) > 4096 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.projectRequestMessage"), spec, validationutil.MaxLenError(4096))) + } + + if name := spec.ProjectRequestTemplate.Name; len(name) > 0 { + for _, msg := range validation.NameIsDNSSubdomain(spec.ProjectRequestTemplate.Name, false) { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.projectRequestTemplate.name"), name, msg)) + } + } + + return allErrs +} + +func (projectV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toProjectV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateProjectSpec(obj.Spec)...) + + return allErrs +} + +func (projectV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toProjectV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toProjectV1(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateProjectSpec(obj.Spec)...) + + return allErrs +} + +func (projectV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toProjectV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toProjectV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validate_rbr.go b/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validate_rbr.go new file mode 100644 index 0000000000000..28d4958db9490 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validate_rbr.go @@ -0,0 +1,84 @@ +package rolebindingrestriction + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + authorizationv1 "github.com/openshift/api/authorization/v1" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" + rbrvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validation" +) + +const PluginName = "authorization.openshift.io/ValidateRoleBindingRestriction" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + {Group: authorizationv1.GroupName, Resource: "rolebindingrestrictions"}: true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + authorizationv1.GroupVersion.WithKind("RoleBindingRestriction"): roleBindingRestrictionV1{}, + }) + }) +} + +func toRoleBindingRestriction(uncastObj runtime.Object) (*authorizationv1.RoleBindingRestriction, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*authorizationv1.RoleBindingRestriction) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"RoleBindingRestriction"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{authorizationv1.GroupVersion.String()})) + } + + return obj, nil +} + +type roleBindingRestrictionV1 struct { +} + +func (roleBindingRestrictionV1) ValidateCreate(_ context.Context, obj runtime.Object) field.ErrorList { + roleBindingRestrictionObj, errs := toRoleBindingRestriction(obj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&roleBindingRestrictionObj.ObjectMeta, true, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, rbrvalidation.ValidateRoleBindingRestriction(roleBindingRestrictionObj)...) + + return errs +} + +func (roleBindingRestrictionV1) ValidateUpdate(_ context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + roleBindingRestrictionObj, errs := toRoleBindingRestriction(obj) + if len(errs) > 0 { + return errs + } + roleBindingRestrictionOldObj, errs := toRoleBindingRestriction(oldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&roleBindingRestrictionObj.ObjectMeta, true, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, rbrvalidation.ValidateRoleBindingRestrictionUpdate(roleBindingRestrictionObj, roleBindingRestrictionOldObj)...) + + return errs +} + +func (r roleBindingRestrictionV1) ValidateStatusUpdate(ctx context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + return r.ValidateUpdate(ctx, obj, oldObj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validation/validation.go b/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validation/validation.go new file mode 100644 index 0000000000000..e93824220d7fb --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validation/validation.go @@ -0,0 +1,115 @@ +package validation + +import ( + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core/validation" + + authorizationv1 "github.com/openshift/api/authorization/v1" +) + +func ValidateRoleBindingRestriction(rbr *authorizationv1.RoleBindingRestriction) field.ErrorList { + allErrs := validation.ValidateObjectMeta(&rbr.ObjectMeta, true, + apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata")) + + allErrs = append(allErrs, + ValidateRoleBindingRestrictionSpec(&rbr.Spec, field.NewPath("spec"))...) + + return allErrs +} + +func ValidateRoleBindingRestrictionUpdate(rbr, old *authorizationv1.RoleBindingRestriction) field.ErrorList { + allErrs := ValidateRoleBindingRestriction(rbr) + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&rbr.ObjectMeta, + &old.ObjectMeta, field.NewPath("metadata"))...) + + return allErrs +} + +func ValidateRoleBindingRestrictionSpec(spec *authorizationv1.RoleBindingRestrictionSpec, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + const invalidMsg = `must specify exactly one of userrestriction, grouprestriction, or serviceaccountrestriction` + + if spec.UserRestriction != nil { + if spec.GroupRestriction != nil { + allErrs = append(allErrs, field.Invalid(fld.Child("grouprestriction"), + "both userrestriction and grouprestriction specified", invalidMsg)) + } + if spec.ServiceAccountRestriction != nil { + allErrs = append(allErrs, + field.Invalid(fld.Child("serviceaccountrestriction"), + "both userrestriction and serviceaccountrestriction specified", invalidMsg)) + } + } else if spec.GroupRestriction != nil { + if spec.ServiceAccountRestriction != nil { + allErrs = append(allErrs, + field.Invalid(fld.Child("serviceaccountrestriction"), + "both grouprestriction and serviceaccountrestriction specified", invalidMsg)) + } + } else if spec.ServiceAccountRestriction == nil { + allErrs = append(allErrs, field.Required(fld.Child("userrestriction"), + invalidMsg)) + } + + if spec.UserRestriction != nil { + allErrs = append(allErrs, ValidateRoleBindingRestrictionUser(spec.UserRestriction, fld.Child("userrestriction"))...) + } + if spec.GroupRestriction != nil { + allErrs = append(allErrs, ValidateRoleBindingRestrictionGroup(spec.GroupRestriction, fld.Child("grouprestriction"))...) + } + if spec.ServiceAccountRestriction != nil { + allErrs = append(allErrs, ValidateRoleBindingRestrictionServiceAccount(spec.ServiceAccountRestriction, fld.Child("serviceaccountrestriction"))...) + } + + return allErrs +} + +func ValidateRoleBindingRestrictionUser(user *authorizationv1.UserRestriction, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + const invalidMsg = `must specify at least one user, group, or label selector` + + if !(len(user.Users) > 0 || len(user.Groups) > 0 || len(user.Selectors) > 0) { + allErrs = append(allErrs, field.Required(fld.Child("users"), invalidMsg)) + } + + for i, selector := range user.Selectors { + allErrs = append(allErrs, + unversionedvalidation.ValidateLabelSelector(&selector, + unversionedvalidation.LabelSelectorValidationOptions{}, + fld.Child("selector").Index(i))...) + } + + return allErrs +} + +func ValidateRoleBindingRestrictionGroup(group *authorizationv1.GroupRestriction, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + const invalidMsg = `must specify at least one group or label selector` + + if !(len(group.Groups) > 0 || len(group.Selectors) > 0) { + allErrs = append(allErrs, field.Required(fld.Child("groups"), invalidMsg)) + } + + for i, selector := range group.Selectors { + allErrs = append(allErrs, + unversionedvalidation.ValidateLabelSelector(&selector, + unversionedvalidation.LabelSelectorValidationOptions{}, + fld.Child("selector").Index(i))...) + } + + return allErrs +} + +func ValidateRoleBindingRestrictionServiceAccount(sa *authorizationv1.ServiceAccountRestriction, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + const invalidMsg = `must specify at least one service account or namespace` + + if !(len(sa.ServiceAccounts) > 0 || len(sa.Namespaces) > 0) { + allErrs = append(allErrs, + field.Required(fld.Child("serviceaccounts"), invalidMsg)) + } + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/default_route.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/default_route.go new file mode 100644 index 0000000000000..74608f2cf2eb9 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/default_route.go @@ -0,0 +1,65 @@ +package route + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + + v1 "github.com/openshift/api/route/v1" +) + +const ( + DefaultingPluginName = "route.openshift.io/DefaultRoute" +) + +func RegisterDefaulting(plugins *admission.Plugins) { + plugins.Register(DefaultingPluginName, func(_ io.Reader) (admission.Interface, error) { + return &defaultRoute{ + Handler: admission.NewHandler(admission.Create, admission.Update), + }, nil + }) +} + +type defaultRoute struct { + *admission.Handler +} + +var _ admission.MutationInterface = &defaultRoute{} + +func (a *defaultRoute) Admit(ctx context.Context, attributes admission.Attributes, _ admission.ObjectInterfaces) error { + if attributes.GetResource().GroupResource() != (schema.GroupResource{Group: "route.openshift.io", Resource: "routes"}) { + return nil + } + + if len(attributes.GetSubresource()) > 0 { + return nil + } + + u, ok := attributes.GetObject().(runtime.Unstructured) + if !ok { + // If a request to the resource routes.route.openshift.io is subject to + // kube-apiserver admission, that should imply that the route API is being served as + // CRs and the request body should have been unmarshaled into an unstructured + // object. + return fmt.Errorf("object being admitted is of type %T and does not implement runtime.Unstructured", attributes.GetObject()) + } + + var external v1.Route + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.UnstructuredContent(), &external); err != nil { + return err + } + + SetObjectDefaults_Route(&external) + + content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&external) + if err != nil { + return err + } + u.SetUnstructuredContent(content) + + return nil +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/defaulters.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/defaulters.go new file mode 100644 index 0000000000000..c174dbcdfca50 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/defaulters.go @@ -0,0 +1,28 @@ +package route + +import ( + routev1 "github.com/openshift/api/route/v1" + "github.com/openshift/library-go/pkg/route/defaulting" +) + +// Defaulters defined in github.com/openshift/library-go/pkg/route/defaulting are not recongized by +// codegen (make update). This file MUST contain duplicates of each defaulter function defined in +// library-go, with the body of each function defined here delegating to its library-go +// counterpart. Missing or extra defaulters here will introduce differences between Route as a CRD +// (MicroShift) and Route as an aggregated API of openshift-apiserver. + +func SetDefaults_RouteSpec(obj *routev1.RouteSpec) { + defaulting.SetDefaults_RouteSpec(obj) +} + +func SetDefaults_RouteTargetReference(obj *routev1.RouteTargetReference) { + defaulting.SetDefaults_RouteTargetReference(obj) +} + +func SetDefaults_TLSConfig(obj *routev1.TLSConfig) { + defaulting.SetDefaults_TLSConfig(obj) +} + +func SetDefaults_RouteIngress(obj *routev1.RouteIngress) { + defaulting.SetDefaults_RouteIngress(obj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/defaulters_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/defaulters_test.go new file mode 100644 index 0000000000000..eff11a27765b7 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/defaulters_test.go @@ -0,0 +1,66 @@ +package route + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "io/fs" + "strings" + "testing" + + "k8s.io/apimachinery/pkg/util/sets" +) + +func TestDuplicatedDefaulters(t *testing.T) { + expected, err := findDefaultersInPackage("../../../../vendor/github.com/openshift/library-go/pkg/route/defaulting") + if err != nil { + t.Fatalf("error finding expected manual defaulters: %v", err) + } + + actual, err := findDefaultersInPackage(".") + if err != nil { + t.Fatalf("error finding actual manual defaulters: %v", err) + } + + for _, missing := range expected.Difference(actual).List() { + t.Errorf("missing local duplicate of library-go defaulter %q", missing) + } + + for _, extra := range actual.Difference(expected).List() { + t.Errorf("found local defaulter %q without library-go counterpart", extra) + } +} + +// findDefaultersInPackage parses the source of the Go package at the given path and returns the +// names of all manual defaulter functions it declares. Package function declarations can't be +// enumerated using reflection. +func findDefaultersInPackage(path string) (sets.String, error) { + pkgs, err := parser.ParseDir(token.NewFileSet(), path, func(fi fs.FileInfo) bool { + return !strings.HasSuffix(fi.Name(), "_test.go") + }, 0) + if err != nil { + return nil, fmt.Errorf("failed to parse source of package at %q: %v", path, err) + } + if len(pkgs) != 1 { + return nil, fmt.Errorf("expected exactly 1 package for all sources in %q, got %d", path, len(pkgs)) + } + + defaulters := sets.NewString() + for _, pkg := range pkgs { + ast.Inspect(pkg, func(node ast.Node) bool { + switch typed := node.(type) { + case *ast.Package, *ast.File: + return true + case *ast.FuncDecl: + if typed.Recv == nil && strings.HasPrefix(typed.Name.Name, "SetDefaults_") { + defaulters.Insert(typed.Name.Name) + } + return false + default: + return false + } + }) + } + return defaulters, nil +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/doc.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/doc.go new file mode 100644 index 0000000000000..86f4e3954c020 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/doc.go @@ -0,0 +1,4 @@ +// +k8s:defaulter-gen=TypeMeta +// +k8s:defaulter-gen-input=github.com/openshift/api/route/v1 + +package route diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/validate_route.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/validate_route.go new file mode 100644 index 0000000000000..ba8f004fb30d6 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/validate_route.go @@ -0,0 +1,83 @@ +package route + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + authorizationv1client "k8s.io/client-go/kubernetes/typed/authorization/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + + routev1 "github.com/openshift/api/route/v1" + routevalidation "github.com/openshift/library-go/pkg/route/validation" +) + +const PluginName = "route.openshift.io/ValidateRoute" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return NewValidateRoute() + }) +} + +func toRoute(uncastObj runtime.Object) (*routev1.Route, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + obj, ok := uncastObj.(*routev1.Route) + if !ok { + return nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Route"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{routev1.GroupVersion.String()}), + } + } + + return obj, nil +} + +type routeV1 struct { + secretsGetter func() corev1client.SecretsGetter + sarGetter func() authorizationv1client.SubjectAccessReviewsGetter + routeValidationOptsGetter func() RouteValidationOptionGetter +} + +func (r routeV1) ValidateCreate(ctx context.Context, obj runtime.Object) field.ErrorList { + routeObj, errs := toRoute(obj) + if len(errs) > 0 { + return errs + } + + return routevalidation.ValidateRoute(ctx, routeObj, r.sarGetter().SubjectAccessReviews(), r.secretsGetter(), r.routeValidationOptsGetter().GetValidationOptions()) +} + +func (r routeV1) ValidateUpdate(ctx context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + routeObj, errs := toRoute(obj) + if len(errs) > 0 { + return errs + } + + routeOldObj, errs := toRoute(oldObj) + if len(errs) > 0 { + return errs + } + + return routevalidation.ValidateRouteUpdate(ctx, routeObj, routeOldObj, r.sarGetter().SubjectAccessReviews(), r.secretsGetter(), r.routeValidationOptsGetter().GetValidationOptions()) +} + +func (routeV1) ValidateStatusUpdate(_ context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + routeObj, errs := toRoute(obj) + if len(errs) > 0 { + return errs + } + + routeOldObj, errs := toRoute(oldObj) + if len(errs) > 0 { + return errs + } + + return routevalidation.ValidateRouteStatusUpdate(routeObj, routeOldObj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/validate_route_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/validate_route_test.go new file mode 100644 index 0000000000000..7b57a56be3ee1 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/validate_route_test.go @@ -0,0 +1,149 @@ +package route + +import ( + "context" + "testing" + + routev1 "github.com/openshift/api/route/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/kubernetes/fake" +) + +// setupWithFakeClient setter is only available in unit-tests +func (a *validateCustomResourceWithClient) setupWithFakeClient() { + c := fake.NewSimpleClientset() + a.secretsGetter = c.CoreV1() + a.sarGetter = c.AuthorizationV1() + a.routeValidationOptsGetter = NewRouteValidationOpts() +} + +// TestValidateRoutePlugin verifies if the route validation plugin can handle admits +// for the resource {group: api/route/v1, kind: Route} +// will check if validator client is +// conformant with admission.InitializationValidator interface +func TestValidateRoutePlugin(t *testing.T) { + plugin, err := NewValidateRoute() + if err != nil { + t.Fatal(err) + } + + validator, ok := plugin.(*validateCustomResourceWithClient) + if !ok { + t.Fatal("could not type cast returned value of NewValidateRoute() into type validateCustomResourceWithClient, " + + "perhaps you changed the type in the implementation but not in the tests!") + } + + // unit test specific logic as a replacement for routeAdmitter.SetRESTClientConfig(...) + validator.setupWithFakeClient() + + // admission.InitializationValidator -> ValidateInitialization() + err = validator.ValidateInitialization() + if err != nil { + t.Fatal(err) + } + + r1 := &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + }, + Spec: routev1.RouteSpec{ + To: routev1.RouteTargetReference{ + Kind: "Service", + Name: "default", + }, + }, + } + r2 := r1.DeepCopy() + + s1 := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + }, + Data: map[string][]byte{}, + } + s2 := s1.DeepCopy() + + testCases := []struct { + description string + + object runtime.Object + oldObject runtime.Object + + kind schema.GroupVersionKind + resource schema.GroupVersionResource + + name string + namespace string + + expectedError bool + }{ + { + description: "route object is passed to admission plugin with scheme routev1.Route", + + object: runtime.Object(r1), + oldObject: runtime.Object(r2), + + kind: routev1.GroupVersion.WithKind("Route"), + resource: routev1.GroupVersion.WithResource("routes"), + + name: r1.Name, + namespace: r1.Namespace, + + expectedError: false, + }, + { + description: "non-route object is passed to admission plugin with scheme corev1.Secret", + + object: runtime.Object(s1), + oldObject: runtime.Object(s2), + + kind: corev1.SchemeGroupVersion.WithKind("Secret"), + resource: corev1.SchemeGroupVersion.WithResource("secrets"), + + name: s1.Name, + namespace: s1.Namespace, + + expectedError: false, + }, + { + description: "non-route object is passed to admission plugin with conflicting scheme routev1.Route", + + object: runtime.Object(s1), + oldObject: runtime.Object(s2), + + kind: routev1.GroupVersion.WithKind("Route"), + resource: routev1.GroupVersion.WithResource("routes"), + + name: s1.Name, + namespace: s1.Namespace, + + expectedError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + + attr := admission.NewAttributesRecord( + tc.object, tc.oldObject, + tc.kind, tc.name, tc.namespace, tc.resource, + "", admission.Create, nil, false, + &user.DefaultInfo{}, + ) + + switch err := validator.Validate(context.Background(), attr, nil); { + case !tc.expectedError && err != nil: + t.Fatalf("admission error not expected, but found %q", err) + case tc.expectedError && err == nil: + t.Fatal("admission error expected, but got nil") + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/validation_opts.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/validation_opts.go new file mode 100644 index 0000000000000..3bbe5c30ad655 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/validation_opts.go @@ -0,0 +1,31 @@ +package route + +import ( + "k8s.io/apiserver/pkg/util/feature" + "k8s.io/component-base/featuregate" + + openshiftfeatures "github.com/openshift/api/features" + routecommon "github.com/openshift/library-go/pkg/route" +) + +type RouteValidationOptionGetter interface { + GetValidationOptions() routecommon.RouteValidationOptions +} + +type RouteValidationOpts struct { + opts routecommon.RouteValidationOptions +} + +var _ RouteValidationOptionGetter = &RouteValidationOpts{} + +func NewRouteValidationOpts() *RouteValidationOpts { + return &RouteValidationOpts{ + opts: routecommon.RouteValidationOptions{ + AllowExternalCertificates: feature.DefaultMutableFeatureGate.Enabled(featuregate.Feature(openshiftfeatures.FeatureGateRouteExternalCertificate)), + }, + } +} + +func (o *RouteValidationOpts) GetValidationOptions() routecommon.RouteValidationOptions { + return o.opts +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/route/validation_wrapper.go b/openshift-kube-apiserver/admission/customresourcevalidation/route/validation_wrapper.go new file mode 100644 index 0000000000000..2f0b733353543 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/route/validation_wrapper.go @@ -0,0 +1,92 @@ +package route + +import ( + "fmt" + + routev1 "github.com/openshift/api/route/v1" + "github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig" + authorizationv1client "k8s.io/client-go/kubernetes/typed/authorization/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/rest" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +type validateCustomResourceWithClient struct { + admission.ValidationInterface + + secretsGetter corev1client.SecretsGetter + sarGetter authorizationv1client.SubjectAccessReviewsGetter + routeValidationOptsGetter RouteValidationOptionGetter +} + +func NewValidateRoute() (admission.Interface, error) { + ret := &validateCustomResourceWithClient{} + + delegate, err := customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + routev1.GroupVersion.WithResource("routes").GroupResource(): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + routev1.GroupVersion.WithKind("Route"): routeV1{ + secretsGetter: ret.getSecretsGetter, + sarGetter: ret.getSubjectAccessReviewsGetter, + routeValidationOptsGetter: ret.getRouteValidationOptions, + }, + }) + if err != nil { + return nil, err + } + ret.ValidationInterface = delegate + + return ret, nil +} + +var _ admissionrestconfig.WantsRESTClientConfig = &validateCustomResourceWithClient{} + +func (a *validateCustomResourceWithClient) getSecretsGetter() corev1client.SecretsGetter { + return a.secretsGetter +} + +func (a *validateCustomResourceWithClient) getSubjectAccessReviewsGetter() authorizationv1client.SubjectAccessReviewsGetter { + return a.sarGetter +} + +func (a *validateCustomResourceWithClient) getRouteValidationOptions() RouteValidationOptionGetter { + return a.routeValidationOptsGetter +} + +func (a *validateCustomResourceWithClient) SetRESTClientConfig(restClientConfig rest.Config) { + var err error + + a.secretsGetter, err = corev1client.NewForConfig(&restClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } + + a.sarGetter, err = authorizationv1client.NewForConfig(&restClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } + + a.routeValidationOptsGetter = NewRouteValidationOpts() +} + +func (a *validateCustomResourceWithClient) ValidateInitialization() error { + if a.secretsGetter == nil { + return fmt.Errorf("%s needs a secretsGetter", PluginName) + } + if a.sarGetter == nil { + return fmt.Errorf("%s needs a subjectAccessReviewsGetter", PluginName) + } + if a.routeValidationOptsGetter == nil { + return fmt.Errorf("%s needs a routeValidationOptsGetter", PluginName) + } + + return nil +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/scheduler/validate_scheduler.go b/openshift-kube-apiserver/admission/customresourcevalidation/scheduler/validate_scheduler.go new file mode 100644 index 0000000000000..dddf0c70209d2 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/scheduler/validate_scheduler.go @@ -0,0 +1,107 @@ +package scheduler + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateScheduler" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("schedulers"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Scheduler"): schedulerV1{}, + }) + }) +} + +func toSchedulerV1(uncastObj runtime.Object) (*configv1.Scheduler, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Scheduler) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Scheduler"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type schedulerV1 struct { +} + +func validateSchedulerSpec(spec configv1.SchedulerSpec) field.ErrorList { + allErrs := field.ErrorList{} + + if name := spec.Policy.Name; len(name) > 0 { + for _, msg := range validation.NameIsDNSSubdomain(spec.Policy.Name, false) { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.Policy.name"), name, msg)) + } + } + + return allErrs +} + +func (schedulerV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toSchedulerV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateSchedulerSpec(obj.Spec)...) + + return allErrs +} + +func (schedulerV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toSchedulerV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toSchedulerV1(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateSchedulerSpec(obj.Spec)...) + + return allErrs +} + +func (schedulerV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toSchedulerV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toSchedulerV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc.go new file mode 100644 index 0000000000000..1a7193eff7c75 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc.go @@ -0,0 +1,93 @@ +package securitycontextconstraints + +import ( + "bytes" + "context" + "io" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + + securityv1 "github.com/openshift/api/security/v1" +) + +const DefaultingPluginName = "security.openshift.io/DefaultSecurityContextConstraints" + +func RegisterDefaulting(plugins *admission.Plugins) { + plugins.Register(DefaultingPluginName, func(config io.Reader) (admission.Interface, error) { + return NewDefaulter(), nil + }) +} + +type defaultSCC struct { + *admission.Handler + + scheme *runtime.Scheme + codecFactory runtimeserializer.CodecFactory +} + +var _ admission.MutationInterface = &defaultSCC{} + +func NewDefaulter() admission.Interface { + scheme := runtime.NewScheme() + codecFactory := runtimeserializer.NewCodecFactory(scheme) + utilruntime.Must(securityv1.Install(scheme)) + + return &defaultSCC{ + Handler: admission.NewHandler(admission.Create, admission.Update), + scheme: scheme, + codecFactory: codecFactory, + } +} + +// Admit defaults an SCC by going unstructured > external > internal > external > unstructured +func (a *defaultSCC) Admit(ctx context.Context, attributes admission.Attributes, o admission.ObjectInterfaces) error { + if a.shouldIgnore(attributes) { + return nil + } + + unstructuredOrig, ok := attributes.GetObject().(*unstructured.Unstructured) + if !ok { + return nil + } + buf := &bytes.Buffer{} + if err := unstructured.UnstructuredJSONScheme.Encode(unstructuredOrig, buf); err != nil { + return err + } + + uncastObj, err := runtime.Decode(a.codecFactory.UniversalDeserializer(), buf.Bytes()) + if err != nil { + return err + } + + outSCCExternal := uncastObj.(*securityv1.SecurityContextConstraints) + SetDefaults_SCC(outSCCExternal) + defaultedBytes, err := runtime.Encode(a.codecFactory.LegacyCodec(securityv1.GroupVersion), outSCCExternal) + if err != nil { + return err + } + outUnstructured := &unstructured.Unstructured{} + if _, _, err := unstructured.UnstructuredJSONScheme.Decode(defaultedBytes, nil, outUnstructured); err != nil { + return err + } + + unstructuredOrig.Object = outUnstructured.Object + + return nil +} + +func (a *defaultSCC) shouldIgnore(attributes admission.Attributes) bool { + if attributes.GetResource().GroupResource() != (schema.GroupResource{Group: "security.openshift.io", Resource: "securitycontextconstraints"}) { + return true + } + // if a subresource is specified, skip it + if len(attributes.GetSubresource()) > 0 { + return true + } + + return false +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc_test.go new file mode 100644 index 0000000000000..16c6d56af2e2f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc_test.go @@ -0,0 +1,274 @@ +package securitycontextconstraints + +import ( + "bytes" + "context" + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apiserver/pkg/admission" + + securityv1 "github.com/openshift/api/security/v1" + sccutil "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util" +) + +func TestDefaultingHappens(t *testing.T) { + inputSCC := `{ + "allowHostDirVolumePlugin": true, + "allowHostNetwork": true, + "allowHostPID": true, + "allowHostPorts": true, + "apiVersion": "security.openshift.io/v1", + "kind": "SecurityContextConstraints", + "metadata": { + "annotations": { + "kubernetes.io/description": "node-exporter scc is used for the Prometheus node exporter" + }, + "name": "node-exporter" + }, + "readOnlyRootFilesystem": false, + "runAsUser": { + "type": "RunAsAny" + }, + "seLinuxContext": { + "type": "RunAsAny" + }, + "users": [] +}` + + inputUnstructured := &unstructured.Unstructured{} + _, _, err := unstructured.UnstructuredJSONScheme.Decode([]byte(inputSCC), nil, inputUnstructured) + if err != nil { + t.Fatal(err) + } + + attributes := admission.NewAttributesRecord(inputUnstructured, nil, schema.GroupVersionKind{}, "", "", schema.GroupVersionResource{Group: "security.openshift.io", Resource: "securitycontextconstraints"}, "", admission.Create, nil, false, nil) + defaulter := NewDefaulter() + if err := defaulter.(*defaultSCC).Admit(context.TODO(), attributes, nil); err != nil { + t.Fatal(err) + } + + buf := &bytes.Buffer{} + if err := unstructured.UnstructuredJSONScheme.Encode(inputUnstructured, buf); err != nil { + t.Fatal(err) + } + + expectedSCC := `{ + "allowHostDirVolumePlugin": true, + "allowHostIPC": false, + "allowHostNetwork": true, + "allowHostPID": true, + "allowHostPorts": true, + "allowPrivilegeEscalation": true, + "allowPrivilegedContainer": false, + "allowedCapabilities": null, + "apiVersion": "security.openshift.io/v1", + "defaultAddCapabilities": null, + "fsGroup": { + "type": "RunAsAny" + }, + "groups": [], + "kind": "SecurityContextConstraints", + "metadata": { + "annotations": { + "kubernetes.io/description": "node-exporter scc is used for the Prometheus node exporter" + }, + "name": "node-exporter", + "creationTimestamp":null + }, + "priority": null, + "readOnlyRootFilesystem": false, + "requiredDropCapabilities": null, + "runAsUser": { + "type": "RunAsAny" + }, + "seLinuxContext": { + "type": "RunAsAny" + }, + "supplementalGroups": { + "type": "RunAsAny" + }, + "users": [], + "volumes": [ + "*" + ] +}` + expectedUnstructured := &unstructured.Unstructured{} + if _, _, err := unstructured.UnstructuredJSONScheme.Decode([]byte(expectedSCC), nil, expectedUnstructured); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(expectedUnstructured.Object, inputUnstructured.Object) { + t.Fatal(diff.ObjectDiff(expectedUnstructured.Object, inputUnstructured.Object)) + } +} + +func TestDefaultSecurityContextConstraints(t *testing.T) { + tests := map[string]struct { + scc *securityv1.SecurityContextConstraints + expectedFSGroup securityv1.FSGroupStrategyType + expectedSupGroup securityv1.SupplementalGroupsStrategyType + }{ + "shouldn't default": { + scc: &securityv1.SecurityContextConstraints{ + FSGroup: securityv1.FSGroupStrategyOptions{ + Type: securityv1.FSGroupStrategyMustRunAs, + }, + SupplementalGroups: securityv1.SupplementalGroupsStrategyOptions{ + Type: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyMustRunAs, + expectedSupGroup: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + "default fsgroup runAsAny": { + scc: &securityv1.SecurityContextConstraints{ + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyRunAsAny, + }, + SupplementalGroups: securityv1.SupplementalGroupsStrategyOptions{ + Type: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyRunAsAny, + expectedSupGroup: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + "default sup group runAsAny": { + scc: &securityv1.SecurityContextConstraints{ + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyRunAsAny, + }, + FSGroup: securityv1.FSGroupStrategyOptions{ + Type: securityv1.FSGroupStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyMustRunAs, + expectedSupGroup: securityv1.SupplementalGroupsStrategyRunAsAny, + }, + "default fsgroup runAsAny with mustRunAs UID strategy": { + scc: &securityv1.SecurityContextConstraints{ + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyMustRunAsRange, + }, + SupplementalGroups: securityv1.SupplementalGroupsStrategyOptions{ + Type: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyRunAsAny, + expectedSupGroup: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + "default sup group runAsAny with mustRunAs UID strategy": { + scc: &securityv1.SecurityContextConstraints{ + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyMustRunAsRange, + }, + FSGroup: securityv1.FSGroupStrategyOptions{ + Type: securityv1.FSGroupStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyMustRunAs, + expectedSupGroup: securityv1.SupplementalGroupsStrategyRunAsAny, + }, + } + for k, v := range tests { + SetDefaults_SCC(v.scc) + if v.scc.FSGroup.Type != v.expectedFSGroup { + t.Errorf("%s has invalid fsgroup. Expected: %v got: %v", k, v.expectedFSGroup, v.scc.FSGroup.Type) + } + if v.scc.SupplementalGroups.Type != v.expectedSupGroup { + t.Errorf("%s has invalid supplemental group. Expected: %v got: %v", k, v.expectedSupGroup, v.scc.SupplementalGroups.Type) + } + } +} + +func TestDefaultSCCVolumes(t *testing.T) { + tests := map[string]struct { + scc *securityv1.SecurityContextConstraints + expectedVolumes []securityv1.FSType + expectedHostDir bool + }{ + // this expects the volumes to default to all for an empty volume slice + // but since the host dir setting is false it should be all - host dir + "old client - default allow* fields, no volumes slice": { + scc: &securityv1.SecurityContextConstraints{}, + expectedVolumes: StringSetToFSType(sccutil.GetAllFSTypesExcept(string(securityv1.FSTypeHostPath))), + expectedHostDir: false, + }, + // this expects the volumes to default to all for an empty volume slice + "old client - set allowHostDir true fields, no volumes slice": { + scc: &securityv1.SecurityContextConstraints{ + AllowHostDirVolumePlugin: true, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeAll}, + expectedHostDir: true, + }, + "new client - allow* fields set with matching volume slice": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath}, + AllowHostDirVolumePlugin: true, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath}, + expectedHostDir: true, + }, + "new client - allow* fields set with mismatch host dir volume slice": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath}, + AllowHostDirVolumePlugin: false, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeEmptyDir}, + expectedHostDir: false, + }, + "new client - allow* fields set with mismatch FSTypeAll volume slice": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeAll}, + AllowHostDirVolumePlugin: false, + }, + expectedVolumes: StringSetToFSType(sccutil.GetAllFSTypesExcept(string(securityv1.FSTypeHostPath))), + expectedHostDir: false, + }, + "new client - allow* fields unset with volume slice": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath}, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeEmptyDir}, + expectedHostDir: false, + }, + "new client - extra volume params retained": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath, securityv1.FSTypeGitRepo}, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeGitRepo}, + expectedHostDir: false, + }, + "new client - empty volume slice, host dir true": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{}, + AllowHostDirVolumePlugin: true, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeHostPath}, + expectedHostDir: true, + }, + "new client - empty volume slice, host dir false": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{}, + AllowHostDirVolumePlugin: false, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeNone}, + expectedHostDir: false, + }, + } + for k, v := range tests { + SetDefaults_SCC(v.scc) + + if !reflect.DeepEqual(v.scc.Volumes, v.expectedVolumes) { + t.Errorf("%s has invalid volumes. Expected: %v got: %v", k, v.expectedVolumes, v.scc.Volumes) + } + + if v.scc.AllowHostDirVolumePlugin != v.expectedHostDir { + t.Errorf("%s has invalid host dir. Expected: %v got: %v", k, v.expectedHostDir, v.scc.AllowHostDirVolumePlugin) + } + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaults.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaults.go new file mode 100644 index 0000000000000..e6e4b5ff44fc7 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaults.go @@ -0,0 +1,100 @@ +package securitycontextconstraints + +import ( + "k8s.io/apimachinery/pkg/util/sets" + + securityv1 "github.com/openshift/api/security/v1" + sccutil "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util" +) + +// Default SCCs for new fields. FSGroup and SupplementalGroups are +// set to the RunAsAny strategy if they are unset on the scc. +func SetDefaults_SCC(scc *securityv1.SecurityContextConstraints) { + if len(scc.FSGroup.Type) == 0 { + scc.FSGroup.Type = securityv1.FSGroupStrategyRunAsAny + } + if len(scc.SupplementalGroups.Type) == 0 { + scc.SupplementalGroups.Type = securityv1.SupplementalGroupsStrategyRunAsAny + } + + if scc.Users == nil { + scc.Users = []string{} + } + if scc.Groups == nil { + scc.Groups = []string{} + } + + var defaultAllowedVolumes sets.String + switch { + case scc.Volumes == nil: + // assume a nil volume slice is allowing everything for backwards compatibility + defaultAllowedVolumes = sets.NewString(string(securityv1.FSTypeAll)) + + case len(scc.Volumes) == 0 && scc.AllowHostDirVolumePlugin: + // an empty volume slice means "allow no volumes", but the boolean fields will always take precedence. + defaultAllowedVolumes = sets.NewString(string(securityv1.FSTypeHostPath)) + + case len(scc.Volumes) == 0 && !scc.AllowHostDirVolumePlugin: + // an empty volume slice means "allow no volumes", but cannot be persisted in protobuf. + // convert this to volumes:["none"] + defaultAllowedVolumes = sets.NewString(string(securityv1.FSTypeNone)) + + default: + // defaults the volume slice of the SCC. + // In order to support old clients the boolean fields will always take precedence. + defaultAllowedVolumes = fsTypeToStringSet(scc.Volumes) + } + + if scc.AllowHostDirVolumePlugin { + // if already allowing all then there is no reason to add + if !defaultAllowedVolumes.Has(string(securityv1.FSTypeAll)) { + defaultAllowedVolumes.Insert(string(securityv1.FSTypeHostPath)) + } + } else { + // we should only default all volumes if the SCC came in with FSTypeAll or we defaulted it + // otherwise we should only change the volumes slice to ensure that it does not conflict with + // the AllowHostDirVolumePlugin setting + shouldDefaultAllVolumes := defaultAllowedVolumes.Has(string(securityv1.FSTypeAll)) + + // remove anything from volumes that conflicts with AllowHostDirVolumePlugin = false + defaultAllowedVolumes.Delete(string(securityv1.FSTypeAll)) + defaultAllowedVolumes.Delete(string(securityv1.FSTypeHostPath)) + + if shouldDefaultAllVolumes { + allVolumes := sccutil.GetAllFSTypesExcept(string(securityv1.FSTypeHostPath)) + defaultAllowedVolumes.Insert(allVolumes.List()...) + } + } + + scc.Volumes = StringSetToFSType(defaultAllowedVolumes) + + // Constraints that do not include this field must remain as permissive as + // they were prior to the introduction of this field. + if scc.AllowPrivilegeEscalation == nil { + t := true + scc.AllowPrivilegeEscalation = &t + } + +} + +func StringSetToFSType(set sets.String) []securityv1.FSType { + if set == nil { + return nil + } + volumes := []securityv1.FSType{} + for _, v := range set.List() { + volumes = append(volumes, securityv1.FSType(v)) + } + return volumes +} + +func fsTypeToStringSet(volumes []securityv1.FSType) sets.String { + if volumes == nil { + return nil + } + set := sets.NewString() + for _, v := range volumes { + set.Insert(string(v)) + } + return set +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validate_scc.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validate_scc.go new file mode 100644 index 0000000000000..7928686b7ac13 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validate_scc.go @@ -0,0 +1,80 @@ +package securitycontextconstraints + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + securityv1 "github.com/openshift/api/security/v1" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" + sccvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation" +) + +const PluginName = "security.openshift.io/ValidateSecurityContextConstraints" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + {Group: securityv1.GroupName, Resource: "securitycontextconstraints"}: true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + securityv1.GroupVersion.WithKind("SecurityContextConstraints"): securityContextConstraintsV1{}, + }) + }) +} + +func toSecurityContextConstraints(uncastObj runtime.Object) (*securityv1.SecurityContextConstraints, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + obj, ok := uncastObj.(*securityv1.SecurityContextConstraints) + if !ok { + return nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"SecurityContextConstraints"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{securityv1.GroupVersion.String()}), + } + } + + return obj, nil +} + +type securityContextConstraintsV1 struct { +} + +func (securityContextConstraintsV1) ValidateCreate(_ context.Context, obj runtime.Object) field.ErrorList { + securityContextConstraintsObj, errs := toSecurityContextConstraints(obj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, sccvalidation.ValidateSecurityContextConstraints(securityContextConstraintsObj)...) + + return errs +} + +func (securityContextConstraintsV1) ValidateUpdate(_ context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + securityContextConstraintsObj, errs := toSecurityContextConstraints(obj) + if len(errs) > 0 { + return errs + } + securityContextConstraintsOldObj, errs := toSecurityContextConstraints(oldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, sccvalidation.ValidateSecurityContextConstraintsUpdate(securityContextConstraintsObj, securityContextConstraintsOldObj)...) + + return errs +} + +func (c securityContextConstraintsV1) ValidateStatusUpdate(ctx context.Context, obj runtime.Object, oldObj runtime.Object) field.ErrorList { + return c.ValidateUpdate(ctx, obj, oldObj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation.go new file mode 100644 index 0000000000000..493339867b8c5 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation.go @@ -0,0 +1,275 @@ +package validation + +import ( + "fmt" + "regexp" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/validation" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + kapivalidation "k8s.io/kubernetes/pkg/apis/core/validation" + + securityv1 "github.com/openshift/api/security/v1" +) + +// ValidateSecurityContextConstraintsName can be used to check whether the given +// security context constraint name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateSecurityContextConstraintsName = apimachineryvalidation.NameIsDNSSubdomain + +func ValidateSecurityContextConstraints(scc *securityv1.SecurityContextConstraints) field.ErrorList { + allErrs := validation.ValidateObjectMeta(&scc.ObjectMeta, false, ValidateSecurityContextConstraintsName, field.NewPath("metadata")) + + if scc.Priority != nil { + if *scc.Priority < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("priority"), *scc.Priority, "priority cannot be negative")) + } + } + + // ensure the user strategy has a valid type + runAsUserPath := field.NewPath("runAsUser") + switch scc.RunAsUser.Type { + case securityv1.RunAsUserStrategyMustRunAs, securityv1.RunAsUserStrategyMustRunAsNonRoot, securityv1.RunAsUserStrategyRunAsAny, securityv1.RunAsUserStrategyMustRunAsRange: + //good types + default: + msg := fmt.Sprintf("invalid strategy type. Valid values are %s, %s, %s, %s", securityv1.RunAsUserStrategyMustRunAs, securityv1.RunAsUserStrategyMustRunAsNonRoot, securityv1.RunAsUserStrategyMustRunAsRange, securityv1.RunAsUserStrategyRunAsAny) + allErrs = append(allErrs, field.Invalid(runAsUserPath.Child("type"), scc.RunAsUser.Type, msg)) + } + + // if specified, uid cannot be negative + if scc.RunAsUser.UID != nil { + if *scc.RunAsUser.UID < 0 { + allErrs = append(allErrs, field.Invalid(runAsUserPath.Child("uid"), *scc.RunAsUser.UID, "uid cannot be negative")) + } + } + + // ensure the selinux strategy has a valid type + seLinuxContextPath := field.NewPath("seLinuxContext") + switch scc.SELinuxContext.Type { + case securityv1.SELinuxStrategyMustRunAs, securityv1.SELinuxStrategyRunAsAny: + //good types + default: + msg := fmt.Sprintf("invalid strategy type. Valid values are %s, %s", securityv1.SELinuxStrategyMustRunAs, securityv1.SELinuxStrategyRunAsAny) + allErrs = append(allErrs, field.Invalid(seLinuxContextPath.Child("type"), scc.SELinuxContext.Type, msg)) + } + + // ensure the fsgroup strategy has a valid type + if scc.FSGroup.Type != securityv1.FSGroupStrategyMustRunAs && scc.FSGroup.Type != securityv1.FSGroupStrategyRunAsAny { + allErrs = append(allErrs, field.NotSupported(field.NewPath("fsGroup", "type"), scc.FSGroup.Type, + []string{string(securityv1.FSGroupStrategyMustRunAs), string(securityv1.FSGroupStrategyRunAsAny)})) + } + allErrs = append(allErrs, validateIDRanges(scc.FSGroup.Ranges, field.NewPath("fsGroup"))...) + + if scc.SupplementalGroups.Type != securityv1.SupplementalGroupsStrategyMustRunAs && + scc.SupplementalGroups.Type != securityv1.SupplementalGroupsStrategyRunAsAny { + allErrs = append(allErrs, field.NotSupported(field.NewPath("supplementalGroups", "type"), scc.SupplementalGroups.Type, + []string{string(securityv1.SupplementalGroupsStrategyMustRunAs), string(securityv1.SupplementalGroupsStrategyRunAsAny)})) + } + allErrs = append(allErrs, validateIDRanges(scc.SupplementalGroups.Ranges, field.NewPath("supplementalGroups"))...) + + // validate capabilities + allErrs = append(allErrs, validateSCCCapsAgainstDrops(scc.RequiredDropCapabilities, scc.DefaultAddCapabilities, field.NewPath("defaultAddCapabilities"))...) + allErrs = append(allErrs, validateSCCCapsAgainstDrops(scc.RequiredDropCapabilities, scc.AllowedCapabilities, field.NewPath("allowedCapabilities"))...) + + if hasCap(securityv1.AllowAllCapabilities, scc.AllowedCapabilities) && len(scc.RequiredDropCapabilities) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("requiredDropCapabilities"), scc.RequiredDropCapabilities, + "required capabilities must be empty when all capabilities are allowed by a wildcard")) + } + + allErrs = append(allErrs, validateSCCDefaultAllowPrivilegeEscalation(field.NewPath("defaultAllowPrivilegeEscalation"), scc.DefaultAllowPrivilegeEscalation, scc.AllowPrivilegeEscalation)...) + + allowsFlexVolumes := false + hasNoneVolume := false + + if len(scc.Volumes) > 0 { + for _, fsType := range scc.Volumes { + if fsType == securityv1.FSTypeNone { + hasNoneVolume = true + + } else if fsType == securityv1.FSTypeFlexVolume || fsType == securityv1.FSTypeAll { + allowsFlexVolumes = true + } + } + } + + if hasNoneVolume && len(scc.Volumes) > 1 { + allErrs = append(allErrs, field.Invalid(field.NewPath("volumes"), scc.Volumes, + "if 'none' is specified, no other values are allowed")) + } + + if len(scc.AllowedFlexVolumes) > 0 { + if allowsFlexVolumes { + for idx, allowedFlexVolume := range scc.AllowedFlexVolumes { + if len(allowedFlexVolume.Driver) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("allowedFlexVolumes").Index(idx).Child("driver"), + "must specify a driver")) + } + } + } else { + allErrs = append(allErrs, field.Invalid(field.NewPath("allowedFlexVolumes"), scc.AllowedFlexVolumes, + "volumes does not include 'flexVolume' or '*', so no flex volumes are allowed")) + } + } + + allowedUnsafeSysctlsPath := field.NewPath("allowedUnsafeSysctls") + forbiddenSysctlsPath := field.NewPath("forbiddenSysctls") + allErrs = append(allErrs, validateSCCSysctls(allowedUnsafeSysctlsPath, scc.AllowedUnsafeSysctls)...) + allErrs = append(allErrs, validateSCCSysctls(forbiddenSysctlsPath, scc.ForbiddenSysctls)...) + allErrs = append(allErrs, validatePodSecurityPolicySysctlListsDoNotOverlap(allowedUnsafeSysctlsPath, forbiddenSysctlsPath, scc.AllowedUnsafeSysctls, scc.ForbiddenSysctls)...) + + return allErrs +} + +const sysctlPatternSegmentFmt string = "([a-z0-9][-_a-z0-9]*)?[a-z0-9*]" +const sysctlPatternFmt string = "(" + kapivalidation.SysctlSegmentFmt + "\\.)*" + sysctlPatternSegmentFmt + +var sysctlPatternRegexp = regexp.MustCompile("^" + sysctlPatternFmt + "$") + +func IsValidSysctlPattern(name string) bool { + if len(name) > kapivalidation.SysctlMaxLength { + return false + } + return sysctlPatternRegexp.MatchString(name) +} + +// validatePodSecurityPolicySysctlListsDoNotOverlap validates the values in forbiddenSysctls and allowedSysctls fields do not overlap. +func validatePodSecurityPolicySysctlListsDoNotOverlap(allowedSysctlsFldPath, forbiddenSysctlsFldPath *field.Path, allowedUnsafeSysctls, forbiddenSysctls []string) field.ErrorList { + allErrs := field.ErrorList{} + for i, allowedSysctl := range allowedUnsafeSysctls { + isAllowedSysctlPattern := false + allowedSysctlPrefix := "" + if strings.HasSuffix(allowedSysctl, "*") { + isAllowedSysctlPattern = true + allowedSysctlPrefix = strings.TrimSuffix(allowedSysctl, "*") + } + for j, forbiddenSysctl := range forbiddenSysctls { + isForbiddenSysctlPattern := false + forbiddenSysctlPrefix := "" + if strings.HasSuffix(forbiddenSysctl, "*") { + isForbiddenSysctlPattern = true + forbiddenSysctlPrefix = strings.TrimSuffix(forbiddenSysctl, "*") + } + switch { + case isAllowedSysctlPattern && isForbiddenSysctlPattern: + if strings.HasPrefix(allowedSysctlPrefix, forbiddenSysctlPrefix) { + allErrs = append(allErrs, field.Invalid(allowedSysctlsFldPath.Index(i), allowedUnsafeSysctls[i], fmt.Sprintf("sysctl overlaps with %v", forbiddenSysctl))) + } else if strings.HasPrefix(forbiddenSysctlPrefix, allowedSysctlPrefix) { + allErrs = append(allErrs, field.Invalid(forbiddenSysctlsFldPath.Index(j), forbiddenSysctls[j], fmt.Sprintf("sysctl overlaps with %v", allowedSysctl))) + } + case isAllowedSysctlPattern: + if strings.HasPrefix(forbiddenSysctl, allowedSysctlPrefix) { + allErrs = append(allErrs, field.Invalid(forbiddenSysctlsFldPath.Index(j), forbiddenSysctls[j], fmt.Sprintf("sysctl overlaps with %v", allowedSysctl))) + } + case isForbiddenSysctlPattern: + if strings.HasPrefix(allowedSysctl, forbiddenSysctlPrefix) { + allErrs = append(allErrs, field.Invalid(allowedSysctlsFldPath.Index(i), allowedUnsafeSysctls[i], fmt.Sprintf("sysctl overlaps with %v", forbiddenSysctl))) + } + default: + if allowedSysctl == forbiddenSysctl { + allErrs = append(allErrs, field.Invalid(allowedSysctlsFldPath.Index(i), allowedUnsafeSysctls[i], fmt.Sprintf("sysctl overlaps with %v", forbiddenSysctl))) + } + } + } + } + return allErrs +} + +// validatePodSecurityPolicySysctls validates the sysctls fields of PodSecurityPolicy. +func validateSCCSysctls(fldPath *field.Path, sysctls []string) field.ErrorList { + allErrs := field.ErrorList{} + + if len(sysctls) == 0 { + return allErrs + } + + coversAll := false + for i, s := range sysctls { + if len(s) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), sysctls[i], fmt.Sprintf("empty sysctl not allowed"))) + } else if !IsValidSysctlPattern(string(s)) { + allErrs = append( + allErrs, + field.Invalid(fldPath.Index(i), sysctls[i], fmt.Sprintf("must have at most %d characters and match regex %s", + kapivalidation.SysctlMaxLength, + sysctlPatternFmt, + )), + ) + } else if s[0] == '*' { + coversAll = true + } + } + + if coversAll && len(sysctls) > 1 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("items"), fmt.Sprintf("if '*' is present, must not specify other sysctls"))) + } + + return allErrs +} + +// validateSCCCapsAgainstDrops ensures an allowed cap is not listed in the required drops. +func validateSCCCapsAgainstDrops(requiredDrops []corev1.Capability, capsToCheck []corev1.Capability, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if requiredDrops == nil { + return allErrs + } + for _, cap := range capsToCheck { + if hasCap(cap, requiredDrops) { + allErrs = append(allErrs, field.Invalid(fldPath, cap, + fmt.Sprintf("capability is listed in %s and requiredDropCapabilities", fldPath.String()))) + } + } + return allErrs +} + +// validateSCCDefaultAllowPrivilegeEscalation validates the DefaultAllowPrivilegeEscalation field against the AllowPrivilegeEscalation field of a SecurityContextConstraints. +func validateSCCDefaultAllowPrivilegeEscalation(fldPath *field.Path, defaultAllowPrivilegeEscalation, allowPrivilegeEscalation *bool) field.ErrorList { + allErrs := field.ErrorList{} + if defaultAllowPrivilegeEscalation != nil && allowPrivilegeEscalation != nil && *defaultAllowPrivilegeEscalation && !*allowPrivilegeEscalation { + allErrs = append(allErrs, field.Invalid(fldPath, defaultAllowPrivilegeEscalation, "Cannot set DefaultAllowPrivilegeEscalation to true without also setting AllowPrivilegeEscalation to true")) + } + + return allErrs +} + +// hasCap checks for needle in haystack. +func hasCap(needle corev1.Capability, haystack []corev1.Capability) bool { + for _, c := range haystack { + if needle == c { + return true + } + } + return false +} + +// validateIDRanges ensures the range is valid. +func validateIDRanges(rng []securityv1.IDRange, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for i, r := range rng { + // if 0 <= Min <= Max then we do not need to validate max. It is always greater than or + // equal to 0 and Min. + minPath := fldPath.Child("ranges").Index(i).Child("min") + maxPath := fldPath.Child("ranges").Index(i).Child("max") + + if r.Min < 0 { + allErrs = append(allErrs, field.Invalid(minPath, r.Min, "min cannot be negative")) + } + if r.Max < 0 { + allErrs = append(allErrs, field.Invalid(maxPath, r.Max, "max cannot be negative")) + } + if r.Min > r.Max { + allErrs = append(allErrs, field.Invalid(minPath, r, "min cannot be greater than max")) + } + } + + return allErrs +} + +func ValidateSecurityContextConstraintsUpdate(newScc, oldScc *securityv1.SecurityContextConstraints) field.ErrorList { + allErrs := validation.ValidateObjectMetaUpdate(&newScc.ObjectMeta, &oldScc.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateSecurityContextConstraints(newScc)...) + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation_test.go new file mode 100644 index 0000000000000..01c4d472c0a22 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation_test.go @@ -0,0 +1,343 @@ +package validation + +import ( + "fmt" + "testing" + + kcorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + securityv1 "github.com/openshift/api/security/v1" +) + +func TestValidateSecurityContextConstraints(t *testing.T) { + var invalidUID int64 = -1 + var invalidPriority int32 = -1 + var validPriority int32 = 1 + yes := true + no := false + + validSCC := func() *securityv1.SecurityContextConstraints { + return &securityv1.SecurityContextConstraints{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + SELinuxContext: securityv1.SELinuxContextStrategyOptions{ + Type: securityv1.SELinuxStrategyRunAsAny, + }, + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyRunAsAny, + }, + FSGroup: securityv1.FSGroupStrategyOptions{ + Type: securityv1.FSGroupStrategyRunAsAny, + }, + SupplementalGroups: securityv1.SupplementalGroupsStrategyOptions{ + Type: securityv1.SupplementalGroupsStrategyRunAsAny, + }, + Priority: &validPriority, + } + } + + noUserOptions := validSCC() + noUserOptions.RunAsUser.Type = "" + + noSELinuxOptions := validSCC() + noSELinuxOptions.SELinuxContext.Type = "" + + invalidUserStratType := validSCC() + invalidUserStratType.RunAsUser.Type = "invalid" + + invalidSELinuxStratType := validSCC() + invalidSELinuxStratType.SELinuxContext.Type = "invalid" + + invalidUIDSCC := validSCC() + invalidUIDSCC.RunAsUser.Type = securityv1.RunAsUserStrategyMustRunAs + invalidUIDSCC.RunAsUser.UID = &invalidUID + + missingObjectMetaName := validSCC() + missingObjectMetaName.ObjectMeta.Name = "" + + noFSGroupOptions := validSCC() + noFSGroupOptions.FSGroup.Type = "" + + invalidFSGroupStratType := validSCC() + invalidFSGroupStratType.FSGroup.Type = "invalid" + + noSupplementalGroupsOptions := validSCC() + noSupplementalGroupsOptions.SupplementalGroups.Type = "" + + invalidSupGroupStratType := validSCC() + invalidSupGroupStratType.SupplementalGroups.Type = "invalid" + + invalidRangeMinGreaterThanMax := validSCC() + invalidRangeMinGreaterThanMax.FSGroup.Ranges = []securityv1.IDRange{ + {Min: 2, Max: 1}, + } + + invalidRangeNegativeMin := validSCC() + invalidRangeNegativeMin.FSGroup.Ranges = []securityv1.IDRange{ + {Min: -1, Max: 10}, + } + + invalidRangeNegativeMax := validSCC() + invalidRangeNegativeMax.FSGroup.Ranges = []securityv1.IDRange{ + {Min: 1, Max: -10}, + } + + negativePriority := validSCC() + negativePriority.Priority = &invalidPriority + + requiredCapAddAndDrop := validSCC() + requiredCapAddAndDrop.DefaultAddCapabilities = []kcorev1.Capability{"foo"} + requiredCapAddAndDrop.RequiredDropCapabilities = []kcorev1.Capability{"foo"} + + allowedCapListedInRequiredDrop := validSCC() + allowedCapListedInRequiredDrop.RequiredDropCapabilities = []kcorev1.Capability{"foo"} + allowedCapListedInRequiredDrop.AllowedCapabilities = []kcorev1.Capability{"foo"} + + wildcardAllowedCapAndRequiredDrop := validSCC() + wildcardAllowedCapAndRequiredDrop.RequiredDropCapabilities = []kcorev1.Capability{"foo"} + wildcardAllowedCapAndRequiredDrop.AllowedCapabilities = []kcorev1.Capability{securityv1.AllowAllCapabilities} + + emptyFlexDriver := validSCC() + emptyFlexDriver.Volumes = []securityv1.FSType{securityv1.FSTypeFlexVolume} + emptyFlexDriver.AllowedFlexVolumes = []securityv1.AllowedFlexVolume{{}} + + nonEmptyFlexVolumes := validSCC() + nonEmptyFlexVolumes.AllowedFlexVolumes = []securityv1.AllowedFlexVolume{{Driver: "example/driver"}} + + invalidDefaultAllowPrivilegeEscalation := validSCC() + invalidDefaultAllowPrivilegeEscalation.DefaultAllowPrivilegeEscalation = &yes + invalidDefaultAllowPrivilegeEscalation.AllowPrivilegeEscalation = &no + + invalidAllowedUnsafeSysctlPattern := validSCC() + invalidAllowedUnsafeSysctlPattern.AllowedUnsafeSysctls = []string{"a.*.b"} + + invalidForbiddenSysctlPattern := validSCC() + invalidForbiddenSysctlPattern.ForbiddenSysctls = []string{"a.*.b"} + + invalidOverlappingSysctls := validSCC() + invalidOverlappingSysctls.ForbiddenSysctls = []string{"kernel.*", "net.ipv4.ip_local_port_range"} + invalidOverlappingSysctls.AllowedUnsafeSysctls = []string{"kernel.shmmax", "net.ipv4.ip_local_port_range"} + + invalidDuplicatedSysctls := validSCC() + invalidDuplicatedSysctls.ForbiddenSysctls = []string{"net.ipv4.ip_local_port_range"} + invalidDuplicatedSysctls.AllowedUnsafeSysctls = []string{"net.ipv4.ip_local_port_range"} + + errorCases := map[string]struct { + scc *securityv1.SecurityContextConstraints + errorType field.ErrorType + errorDetail string + }{ + "no user options": { + scc: noUserOptions, + errorType: field.ErrorTypeInvalid, + errorDetail: "invalid strategy type. Valid values are MustRunAs, MustRunAsNonRoot, MustRunAsRange, RunAsAny", + }, + "no selinux options": { + scc: noSELinuxOptions, + errorType: field.ErrorTypeInvalid, + errorDetail: "invalid strategy type. Valid values are MustRunAs, RunAsAny", + }, + "no fsgroup options": { + scc: noFSGroupOptions, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: \"MustRunAs\", \"RunAsAny\"", + }, + "no sup group options": { + scc: noSupplementalGroupsOptions, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: \"MustRunAs\", \"RunAsAny\"", + }, + "invalid user strategy type": { + scc: invalidUserStratType, + errorType: field.ErrorTypeInvalid, + errorDetail: "invalid strategy type. Valid values are MustRunAs, MustRunAsNonRoot, MustRunAsRange, RunAsAny", + }, + "invalid selinux strategy type": { + scc: invalidSELinuxStratType, + errorType: field.ErrorTypeInvalid, + errorDetail: "invalid strategy type. Valid values are MustRunAs, RunAsAny", + }, + "invalid sup group strategy type": { + scc: invalidSupGroupStratType, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: \"MustRunAs\", \"RunAsAny\"", + }, + "invalid fs group strategy type": { + scc: invalidFSGroupStratType, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: \"MustRunAs\", \"RunAsAny\"", + }, + "invalid uid": { + scc: invalidUIDSCC, + errorType: field.ErrorTypeInvalid, + errorDetail: "uid cannot be negative", + }, + "missing object meta name": { + scc: missingObjectMetaName, + errorType: field.ErrorTypeRequired, + errorDetail: "name or generateName is required", + }, + "invalid range min greater than max": { + scc: invalidRangeMinGreaterThanMax, + errorType: field.ErrorTypeInvalid, + errorDetail: "min cannot be greater than max", + }, + "invalid range negative min": { + scc: invalidRangeNegativeMin, + errorType: field.ErrorTypeInvalid, + errorDetail: "min cannot be negative", + }, + "invalid range negative max": { + scc: invalidRangeNegativeMax, + errorType: field.ErrorTypeInvalid, + errorDetail: "max cannot be negative", + }, + "negative priority": { + scc: negativePriority, + errorType: field.ErrorTypeInvalid, + errorDetail: "priority cannot be negative", + }, + "invalid required caps": { + scc: requiredCapAddAndDrop, + errorType: field.ErrorTypeInvalid, + errorDetail: "capability is listed in defaultAddCapabilities and requiredDropCapabilities", + }, + "allowed cap listed in required drops": { + scc: allowedCapListedInRequiredDrop, + errorType: field.ErrorTypeInvalid, + errorDetail: "capability is listed in allowedCapabilities and requiredDropCapabilities", + }, + "all caps allowed by a wildcard and required drops is not empty": { + scc: wildcardAllowedCapAndRequiredDrop, + errorType: field.ErrorTypeInvalid, + errorDetail: "required capabilities must be empty when all capabilities are allowed by a wildcard", + }, + "empty flex volume driver": { + scc: emptyFlexDriver, + errorType: field.ErrorTypeRequired, + errorDetail: "must specify a driver", + }, + "non-empty allowed flex volumes": { + scc: nonEmptyFlexVolumes, + errorType: field.ErrorTypeInvalid, + errorDetail: "volumes does not include 'flexVolume' or '*', so no flex volumes are allowed", + }, + "invalid defaultAllowPrivilegeEscalation": { + scc: invalidDefaultAllowPrivilegeEscalation, + errorType: field.ErrorTypeInvalid, + errorDetail: "Cannot set DefaultAllowPrivilegeEscalation to true without also setting AllowPrivilegeEscalation to true", + }, + "invalid allowed unsafe sysctl pattern": { + scc: invalidAllowedUnsafeSysctlPattern, + errorType: field.ErrorTypeInvalid, + errorDetail: fmt.Sprintf("must have at most 253 characters and match regex %s", sysctlPatternFmt), + }, + "invalid forbidden sysctl pattern": { + scc: invalidForbiddenSysctlPattern, + errorType: field.ErrorTypeInvalid, + errorDetail: fmt.Sprintf("must have at most 253 characters and match regex %s", sysctlPatternFmt), + }, + "invalid overlapping sysctl pattern": { + scc: invalidOverlappingSysctls, + errorType: field.ErrorTypeInvalid, + errorDetail: fmt.Sprintf("sysctl overlaps with %s", invalidOverlappingSysctls.ForbiddenSysctls[0]), + }, + "invalid duplicated sysctls": { + scc: invalidDuplicatedSysctls, + errorType: field.ErrorTypeInvalid, + errorDetail: fmt.Sprintf("sysctl overlaps with %s", invalidDuplicatedSysctls.AllowedUnsafeSysctls[0]), + }, + } + + for k, v := range errorCases { + t.Run(k, func(t *testing.T) { + if errs := ValidateSecurityContextConstraints(v.scc); len(errs) == 0 || errs[0].Type != v.errorType || errs[0].Detail != v.errorDetail { + t.Errorf("Expected error type %q with detail %q, got %v", v.errorType, v.errorDetail, errs) + } + }) + } + + var validUID int64 = 1 + + mustRunAs := validSCC() + mustRunAs.FSGroup.Type = securityv1.FSGroupStrategyMustRunAs + mustRunAs.SupplementalGroups.Type = securityv1.SupplementalGroupsStrategyMustRunAs + mustRunAs.RunAsUser.Type = securityv1.RunAsUserStrategyMustRunAs + mustRunAs.RunAsUser.UID = &validUID + mustRunAs.SELinuxContext.Type = securityv1.SELinuxStrategyMustRunAs + + runAsNonRoot := validSCC() + runAsNonRoot.RunAsUser.Type = securityv1.RunAsUserStrategyMustRunAsNonRoot + + caseInsensitiveAddDrop := validSCC() + caseInsensitiveAddDrop.DefaultAddCapabilities = []kcorev1.Capability{"foo"} + caseInsensitiveAddDrop.RequiredDropCapabilities = []kcorev1.Capability{"FOO"} + + caseInsensitiveAllowedDrop := validSCC() + caseInsensitiveAllowedDrop.RequiredDropCapabilities = []kcorev1.Capability{"FOO"} + caseInsensitiveAllowedDrop.AllowedCapabilities = []kcorev1.Capability{"foo"} + + flexvolumeWhenFlexVolumesAllowed := validSCC() + flexvolumeWhenFlexVolumesAllowed.Volumes = []securityv1.FSType{securityv1.FSTypeFlexVolume} + flexvolumeWhenFlexVolumesAllowed.AllowedFlexVolumes = []securityv1.AllowedFlexVolume{ + {Driver: "example/driver1"}, + } + + flexvolumeWhenAllVolumesAllowed := validSCC() + flexvolumeWhenAllVolumesAllowed.Volumes = []securityv1.FSType{securityv1.FSTypeAll} + flexvolumeWhenAllVolumesAllowed.AllowedFlexVolumes = []securityv1.AllowedFlexVolume{ + {Driver: "example/driver2"}, + } + + validDefaultAllowPrivilegeEscalation := validSCC() + validDefaultAllowPrivilegeEscalation.DefaultAllowPrivilegeEscalation = &yes + validDefaultAllowPrivilegeEscalation.AllowPrivilegeEscalation = &yes + + withForbiddenSysctl := validSCC() + withForbiddenSysctl.ForbiddenSysctls = []string{"net.*"} + + withAllowedUnsafeSysctl := validSCC() + withAllowedUnsafeSysctl.AllowedUnsafeSysctls = []string{"net.ipv4.tcp_max_syn_backlog"} + + successCases := map[string]struct { + scc *securityv1.SecurityContextConstraints + }{ + "must run as": { + scc: mustRunAs, + }, + "run as any": { + scc: validSCC(), + }, + "run as non-root (user only)": { + scc: runAsNonRoot, + }, + "comparison for add -> drop is case sensitive": { + scc: caseInsensitiveAddDrop, + }, + "comparison for allowed -> drop is case sensitive": { + scc: caseInsensitiveAllowedDrop, + }, + "allow white-listed flexVolume when flex volumes are allowed": { + scc: flexvolumeWhenFlexVolumesAllowed, + }, + "allow white-listed flexVolume when all volumes are allowed": { + scc: flexvolumeWhenAllVolumesAllowed, + }, + "valid defaultAllowPrivilegeEscalation as true": { + scc: validDefaultAllowPrivilegeEscalation, + }, + "with network sysctls forbidden": { + scc: withForbiddenSysctl, + }, + "with unsafe net.ipv4.tcp_max_syn_backlog sysctl allowed": { + scc: withAllowedUnsafeSysctl, + }, + } + + for k, v := range successCases { + if errs := ValidateSecurityContextConstraints(v.scc); len(errs) != 0 { + t.Errorf("Expected success for %q, got %v", k, errs) + } + } +} diff --git a/openshift-kube-apiserver/admission/namespaceconditions/decorator.go b/openshift-kube-apiserver/admission/namespaceconditions/decorator.go new file mode 100644 index 0000000000000..02d7fa357cf71 --- /dev/null +++ b/openshift-kube-apiserver/admission/namespaceconditions/decorator.go @@ -0,0 +1,91 @@ +package namespaceconditions + +import ( + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1lister "k8s.io/client-go/listers/core/v1" +) + +// this is a list of namespaces with special meaning. The kube ones are here in particular because +// we don't control their creation or labeling on their creation +var runLevelZeroNamespaces = sets.NewString("default", "kube-system", "kube-public") +var runLevelOneNamespaces = sets.NewString("openshift-node", "openshift-infra", "openshift") + +func init() { + runLevelOneNamespaces.Insert(runLevelZeroNamespaces.List()...) +} + +// NamespaceLabelConditions provides a decorator that can delegate and conditionally add label conditions +type NamespaceLabelConditions struct { + NamespaceClient corev1client.NamespacesGetter + NamespaceLister corev1lister.NamespaceLister + + SkipLevelZeroNames sets.String + SkipLevelOneNames sets.String +} + +func (d *NamespaceLabelConditions) WithNamespaceLabelConditions(admissionPlugin admission.Interface, name string) admission.Interface { + switch { + case d.SkipLevelOneNames.Has(name): + // return a decorated admission plugin that skips runlevel 0 and 1 namespaces based on name (for known values) and + // label. + return &pluginHandlerWithNamespaceNameConditions{ + admissionPlugin: &pluginHandlerWithNamespaceLabelConditions{ + admissionPlugin: admissionPlugin, + namespaceClient: d.NamespaceClient, + namespaceLister: d.NamespaceLister, + namespaceSelector: skipRunLevelOneSelector, + }, + namespacesToExclude: runLevelOneNamespaces, + } + + case d.SkipLevelZeroNames.Has(name): + // return a decorated admission plugin that skips runlevel 0 namespaces based on name (for known values) and + // label. + return &pluginHandlerWithNamespaceNameConditions{ + admissionPlugin: &pluginHandlerWithNamespaceLabelConditions{ + admissionPlugin: admissionPlugin, + namespaceClient: d.NamespaceClient, + namespaceLister: d.NamespaceLister, + namespaceSelector: skipRunLevelZeroSelector, + }, + namespacesToExclude: runLevelZeroNamespaces, + } + + default: + return admissionPlugin + } +} + +// NamespaceLabelSelector provides a decorator that delegates +type NamespaceLabelSelector struct { + namespaceClient corev1client.NamespacesGetter + namespaceLister corev1lister.NamespaceLister + + admissionPluginNamesToDecorate sets.String + namespaceLabelSelector labels.Selector +} + +func NewConditionalAdmissionPlugins(nsClient corev1client.NamespacesGetter, nsLister corev1lister.NamespaceLister, nsSelector labels.Selector, admissionPluginNames ...string) *NamespaceLabelSelector { + return &NamespaceLabelSelector{ + namespaceClient: nsClient, + namespaceLister: nsLister, + admissionPluginNamesToDecorate: sets.NewString(admissionPluginNames...), + namespaceLabelSelector: nsSelector, + } +} + +func (d *NamespaceLabelSelector) WithNamespaceLabelSelector(admissionPlugin admission.Interface, name string) admission.Interface { + if !d.admissionPluginNamesToDecorate.Has(name) { + return admissionPlugin + } + + return &pluginHandlerWithNamespaceLabelConditions{ + admissionPlugin: admissionPlugin, + namespaceClient: d.namespaceClient, + namespaceLister: d.namespaceLister, + namespaceSelector: d.namespaceLabelSelector, + } +} diff --git a/openshift-kube-apiserver/admission/namespaceconditions/labelcondition.go b/openshift-kube-apiserver/admission/namespaceconditions/labelcondition.go new file mode 100644 index 0000000000000..c3ebaf5895306 --- /dev/null +++ b/openshift-kube-apiserver/admission/namespaceconditions/labelcondition.go @@ -0,0 +1,125 @@ +package namespaceconditions + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apiserver/pkg/admission" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1lister "k8s.io/client-go/listers/core/v1" +) + +const runLevelLabel = "openshift.io/run-level" + +var ( + skipRunLevelZeroSelector labels.Selector + skipRunLevelOneSelector labels.Selector +) + +func init() { + var err error + skipRunLevelZeroSelector, err = labels.Parse(runLevelLabel + " notin ( 0 )") + if err != nil { + panic(err) + } + skipRunLevelOneSelector, err = labels.Parse(runLevelLabel + " notin ( 0,1 )") + if err != nil { + panic(err) + } +} + +// pluginHandlerWithNamespaceLabelConditions wraps an admission plugin in a conditional skip based on namespace labels +type pluginHandlerWithNamespaceLabelConditions struct { + admissionPlugin admission.Interface + namespaceClient corev1client.NamespacesGetter + namespaceLister corev1lister.NamespaceLister + namespaceSelector labels.Selector +} + +var _ admission.ValidationInterface = &pluginHandlerWithNamespaceLabelConditions{} +var _ admission.MutationInterface = &pluginHandlerWithNamespaceLabelConditions{} + +func (p pluginHandlerWithNamespaceLabelConditions) Handles(operation admission.Operation) bool { + return p.admissionPlugin.Handles(operation) +} + +// Admit performs a mutating admission control check and emit metrics. +func (p pluginHandlerWithNamespaceLabelConditions) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + if !p.shouldRunAdmission(a) { + return nil + } + + mutatingHandler, ok := p.admissionPlugin.(admission.MutationInterface) + if !ok { + return nil + } + return mutatingHandler.Admit(ctx, a, o) +} + +// Validate performs a non-mutating admission control check and emits metrics. +func (p pluginHandlerWithNamespaceLabelConditions) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + if !p.shouldRunAdmission(a) { + return nil + } + + validatingHandler, ok := p.admissionPlugin.(admission.ValidationInterface) + if !ok { + return nil + } + return validatingHandler.Validate(ctx, a, o) +} + +// MatchNamespaceSelector decideds whether the request matches the +// namespaceSelctor of the webhook. Only when they match, the webhook is called. +func (p pluginHandlerWithNamespaceLabelConditions) shouldRunAdmission(attr admission.Attributes) bool { + namespaceName := attr.GetNamespace() + if len(namespaceName) == 0 && attr.GetResource().Resource != "namespaces" { + // cluster scoped resources always run admission + return true + } + namespaceLabels, err := p.getNamespaceLabels(attr) + if err != nil { + // default to running the hook so we don't leak namespace existence information + return true + } + // TODO: adding an LRU cache to cache the match decision + return p.namespaceSelector.Matches(labels.Set(namespaceLabels)) +} + +// getNamespaceLabels gets the labels of the namespace related to the attr. +func (p pluginHandlerWithNamespaceLabelConditions) getNamespaceLabels(attr admission.Attributes) (map[string]string, error) { + // If the request itself is creating or updating a namespace, then get the + // labels from attr.Object, because namespaceLister doesn't have the latest + // namespace yet. + // + // However, if the request is deleting a namespace, then get the label from + // the namespace in the namespaceLister, because a delete request is not + // going to change the object, and attr.Object will be a DeleteOptions + // rather than a namespace object. + if attr.GetResource().Resource == "namespaces" && + len(attr.GetSubresource()) == 0 && + (attr.GetOperation() == admission.Create || attr.GetOperation() == admission.Update) { + accessor, err := meta.Accessor(attr.GetObject()) + if err != nil { + return nil, err + } + return accessor.GetLabels(), nil + } + + namespaceName := attr.GetNamespace() + namespace, err := p.namespaceLister.Get(namespaceName) + if err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + if apierrors.IsNotFound(err) { + // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not + namespace, err = p.namespaceClient.Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + } + return namespace.Labels, nil +} diff --git a/openshift-kube-apiserver/admission/namespaceconditions/labelcondition_test.go b/openshift-kube-apiserver/admission/namespaceconditions/labelcondition_test.go new file mode 100644 index 0000000000000..31474a4b7ee93 --- /dev/null +++ b/openshift-kube-apiserver/admission/namespaceconditions/labelcondition_test.go @@ -0,0 +1,97 @@ +package namespaceconditions + +import ( + "reflect" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" +) + +type fakeNamespaceLister struct { + namespaces map[string]*corev1.Namespace +} + +func (f fakeNamespaceLister) List(selector labels.Selector) (ret []*corev1.Namespace, err error) { + return nil, nil +} +func (f fakeNamespaceLister) Get(name string) (*corev1.Namespace, error) { + ns, ok := f.namespaces[name] + if ok { + return ns, nil + } + return nil, errors.NewNotFound(corev1.Resource("namespaces"), name) +} + +func TestGetNamespaceLabels(t *testing.T) { + namespace1Labels := map[string]string{ + "runlevel": "1", + } + namespace1 := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "1", + Labels: namespace1Labels, + }, + } + namespace2Labels := map[string]string{ + "runlevel": "2", + } + namespace2 := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "2", + Labels: namespace2Labels, + }, + } + namespaceLister := fakeNamespaceLister{map[string]*corev1.Namespace{ + "1": &namespace1, + }, + } + + tests := []struct { + name string + attr admission.Attributes + expectedLabels map[string]string + }{ + { + name: "request is for creating namespace, the labels should be from the object itself", + attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, "", namespace2.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Create, nil, false, nil), + expectedLabels: namespace2Labels, + }, + { + name: "request is for updating namespace, the labels should be from the new object", + attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, namespace2.Name, namespace2.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Update, nil, false, nil), + expectedLabels: namespace2Labels, + }, + { + name: "request is for deleting namespace, the labels should be from the cache", + attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, namespace1.Name, namespace1.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Delete, nil, false, nil), + expectedLabels: namespace1Labels, + }, + { + name: "request is for namespace/finalizer", + attr: admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, namespace1.Name, "mock-name", schema.GroupVersionResource{Resource: "namespaces"}, "finalizers", admission.Create, nil, false, nil), + expectedLabels: namespace1Labels, + }, + { + name: "request is for pod", + attr: admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, namespace1.Name, "mock-name", schema.GroupVersionResource{Resource: "pods"}, "", admission.Create, nil, false, nil), + expectedLabels: namespace1Labels, + }, + } + matcher := pluginHandlerWithNamespaceLabelConditions{ + namespaceLister: namespaceLister, + } + for _, tt := range tests { + actualLabels, err := matcher.getNamespaceLabels(tt.attr) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(actualLabels, tt.expectedLabels) { + t.Errorf("expected labels to be %#v, got %#v", tt.expectedLabels, actualLabels) + } + } +} diff --git a/openshift-kube-apiserver/admission/namespaceconditions/namecondition.go b/openshift-kube-apiserver/admission/namespaceconditions/namecondition.go new file mode 100644 index 0000000000000..848cef4d13ac4 --- /dev/null +++ b/openshift-kube-apiserver/admission/namespaceconditions/namecondition.go @@ -0,0 +1,60 @@ +package namespaceconditions + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" +) + +// pluginHandlerWithNamespaceNameConditions skips running admission plugins if they deal in the namespaceToExclude list +type pluginHandlerWithNamespaceNameConditions struct { + admissionPlugin admission.Interface + namespacesToExclude sets.String +} + +var _ admission.ValidationInterface = &pluginHandlerWithNamespaceNameConditions{} +var _ admission.MutationInterface = &pluginHandlerWithNamespaceNameConditions{} + +func (p pluginHandlerWithNamespaceNameConditions) Handles(operation admission.Operation) bool { + return p.admissionPlugin.Handles(operation) +} + +// Admit performs a mutating admission control check and emit metrics. +func (p pluginHandlerWithNamespaceNameConditions) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + if !p.shouldRunAdmission(a) { + return nil + } + + mutatingHandler, ok := p.admissionPlugin.(admission.MutationInterface) + if !ok { + return nil + } + return mutatingHandler.Admit(ctx, a, o) +} + +// Validate performs a non-mutating admission control check and emits metrics. +func (p pluginHandlerWithNamespaceNameConditions) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + if !p.shouldRunAdmission(a) { + return nil + } + + validatingHandler, ok := p.admissionPlugin.(admission.ValidationInterface) + if !ok { + return nil + } + return validatingHandler.Validate(ctx, a, o) +} + +func (p pluginHandlerWithNamespaceNameConditions) shouldRunAdmission(attr admission.Attributes) bool { + namespaceName := attr.GetNamespace() + if p.namespacesToExclude.Has(namespaceName) { + return false + } + if (attr.GetResource().GroupResource() == schema.GroupResource{Resource: "namespaces"}) && p.namespacesToExclude.Has(attr.GetName()) { + return false + } + + return true +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/doc.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/doc.go new file mode 100644 index 0000000000000..4ef9330be1224 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package externalipranger is the internal version of the API. +package externalipranger diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/register.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/register.go new file mode 100644 index 0000000000000..fe92abf523c1e --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/register.go @@ -0,0 +1,20 @@ +package externalipranger + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var GroupVersion = schema.GroupVersion{Group: "network.openshift.io", Version: runtime.APIVersionInternal} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ExternalIPRangerAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/types.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/types.go new file mode 100644 index 0000000000000..f127ca27aadcb --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/types.go @@ -0,0 +1,20 @@ +package externalipranger + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RestrictedEndpointsAdmissionConfig is the configuration for which CIDRs services can't manage +type ExternalIPRangerAdmissionConfig struct { + metav1.TypeMeta + + // ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP + // may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that + // CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You + // should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons. + ExternalIPNetworkCIDRs []string + // AllowIngressIP indicates that ingress IPs should be allowed + AllowIngressIP bool +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/doc.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/doc.go new file mode 100644 index 0000000000000..79476f394930a --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/externalipranger + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/register.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/register.go new file mode 100644 index 0000000000000..f55b5a5b494df --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/register.go @@ -0,0 +1,24 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints" +) + +var GroupVersion = schema.GroupVersion{Group: "network.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + restrictedendpoints.Install, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ExternalIPRangerAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/types.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/types.go new file mode 100644 index 0000000000000..0fb8ea4ca830a --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/types.go @@ -0,0 +1,20 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExternalIPRangerAdmissionConfig is the configuration for which CIDRs services can't manage +type ExternalIPRangerAdmissionConfig struct { + metav1.TypeMeta `json:",inline"` + + // ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP + // may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that + // CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You + // should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons. + ExternalIPNetworkCIDRs []string `json:"externalIPNetworkCIDRs"` + // AllowIngressIP indicates that ingress IPs should be allowed + AllowIngressIP bool `json:"allowIngressIP"` +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/doc.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/doc.go new file mode 100644 index 0000000000000..ff46fb9f13d76 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package restrictedendpoints is the internal version of the API. +package restrictedendpoints diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/register.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/register.go new file mode 100644 index 0000000000000..171a4b1be5182 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/register.go @@ -0,0 +1,20 @@ +package restrictedendpoints + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var GroupVersion = schema.GroupVersion{Group: "network.openshift.io", Version: runtime.APIVersionInternal} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &RestrictedEndpointsAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/types.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/types.go new file mode 100644 index 0000000000000..e205762215ba1 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/types.go @@ -0,0 +1,15 @@ +package restrictedendpoints + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RestrictedEndpointsAdmissionConfig is the configuration for which CIDRs services can't manage +type RestrictedEndpointsAdmissionConfig struct { + metav1.TypeMeta + + // RestrictedCIDRs indicates what CIDRs will be disallowed for services. + RestrictedCIDRs []string +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/doc.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/doc.go new file mode 100644 index 0000000000000..0dac22208df49 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/register.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/register.go new file mode 100644 index 0000000000000..f924353fe24d3 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/register.go @@ -0,0 +1,24 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints" +) + +var GroupVersion = schema.GroupVersion{Group: "network.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + restrictedendpoints.Install, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &RestrictedEndpointsAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/types.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/types.go new file mode 100644 index 0000000000000..f665aa1e73c2f --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/types.go @@ -0,0 +1,15 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RestrictedEndpointsAdmissionConfig is the configuration for which CIDRs services can't manage +type RestrictedEndpointsAdmissionConfig struct { + metav1.TypeMeta `json:",inline"` + + // RestrictedCIDRs indicates what CIDRs will be disallowed for services. + RestrictedCIDRs []string `json:"restrictedCIDRs"` +} diff --git a/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission.go b/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission.go new file mode 100644 index 0000000000000..63c26a833c8f0 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission.go @@ -0,0 +1,209 @@ +package externalipranger + +import ( + "context" + "fmt" + "io" + "net" + "strings" + + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/klog/v2" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/externalipranger" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/externalipranger/v1" + kapi "k8s.io/kubernetes/pkg/apis/core" + netutils "k8s.io/utils/net" +) + +const ExternalIPPluginName = "network.openshift.io/ExternalIPRanger" + +func RegisterExternalIP(plugins *admission.Plugins) { + plugins.Register("network.openshift.io/ExternalIPRanger", + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", ExternalIPPluginName) + return nil, nil + } + + // this needs to be moved upstream to be part of core config + reject, admit, err := ParseRejectAdmitCIDRRules(pluginConfig.ExternalIPNetworkCIDRs) + if err != nil { + // should have been caught with validation + return nil, err + } + + return NewExternalIPRanger(reject, admit, pluginConfig.AllowIngressIP), nil + }) +} + +func readConfig(reader io.Reader) (*externalipranger.ExternalIPRangerAdmissionConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, externalipranger.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*externalipranger.ExternalIPRangerAdmissionConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + // No validation needed since config is just list of strings + return config, nil +} + +type externalIPRanger struct { + *admission.Handler + reject []*net.IPNet + admit []*net.IPNet + authorizer authorizer.Authorizer + allowIngressIP bool +} + +var _ admission.Interface = &externalIPRanger{} +var _ admission.ValidationInterface = &externalIPRanger{} +var _ = initializer.WantsAuthorizer(&externalIPRanger{}) + +// ParseRejectAdmitCIDRRules calculates a blacklist and whitelist from a list of string CIDR rules (treating +// a leading ! as a negation). Returns an error if any rule is invalid. +func ParseRejectAdmitCIDRRules(rules []string) (reject, admit []*net.IPNet, err error) { + for _, s := range rules { + negate := false + if strings.HasPrefix(s, "!") { + negate = true + s = s[1:] + } + _, cidr, err := netutils.ParseCIDRSloppy(s) + if err != nil { + return nil, nil, err + } + if negate { + reject = append(reject, cidr) + } else { + admit = append(admit, cidr) + } + } + return reject, admit, nil +} + +// NewConstraint creates a new SCC constraint admission plugin. +func NewExternalIPRanger(reject, admit []*net.IPNet, allowIngressIP bool) *externalIPRanger { + return &externalIPRanger{ + Handler: admission.NewHandler(admission.Create, admission.Update), + reject: reject, + admit: admit, + allowIngressIP: allowIngressIP, + } +} + +func (r *externalIPRanger) SetAuthorizer(a authorizer.Authorizer) { + r.authorizer = a +} + +func (r *externalIPRanger) ValidateInitialization() error { + if r.authorizer == nil { + return fmt.Errorf("missing authorizer") + } + return nil +} + +// NetworkSlice is a helper for checking whether an IP is contained in a range +// of networks. +type NetworkSlice []*net.IPNet + +func (s NetworkSlice) Contains(ip net.IP) bool { + for _, cidr := range s { + if cidr.Contains(ip) { + return true + } + } + return false +} + +// Admit determines if the service should be admitted based on the configured network CIDR. +func (r *externalIPRanger) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) error { + if a.GetResource().GroupResource() != kapi.Resource("services") { + return nil + } + + svc, ok := a.GetObject().(*kapi.Service) + // if we can't convert then we don't handle this object so just return + if !ok { + return nil + } + + // Determine if an ingress ip address should be allowed as an + // external ip by checking the loadbalancer status of the previous + // object state. Only updates need to be validated against the + // ingress ip since the loadbalancer status cannot be set on + // create. + ingressIP := "" + retrieveIngressIP := a.GetOperation() == admission.Update && + r.allowIngressIP && svc.Spec.Type == kapi.ServiceTypeLoadBalancer + if retrieveIngressIP { + old, ok := a.GetOldObject().(*kapi.Service) + ipPresent := ok && old != nil && len(old.Status.LoadBalancer.Ingress) > 0 + if ipPresent { + ingressIP = old.Status.LoadBalancer.Ingress[0].IP + } + } + + var errs field.ErrorList + switch { + // administrator disabled externalIPs + case len(svc.Spec.ExternalIPs) > 0 && len(r.admit) == 0: + onlyIngressIP := len(svc.Spec.ExternalIPs) == 1 && svc.Spec.ExternalIPs[0] == ingressIP + if !onlyIngressIP { + errs = append(errs, field.Forbidden(field.NewPath("spec", "externalIPs"), "externalIPs have been disabled")) + } + // administrator has limited the range + case len(svc.Spec.ExternalIPs) > 0 && len(r.admit) > 0: + for i, s := range svc.Spec.ExternalIPs { + ip := netutils.ParseIPSloppy(s) + if ip == nil { + errs = append(errs, field.Forbidden(field.NewPath("spec", "externalIPs").Index(i), "externalIPs must be a valid address")) + continue + } + notIngressIP := s != ingressIP + if (NetworkSlice(r.reject).Contains(ip) || !NetworkSlice(r.admit).Contains(ip)) && notIngressIP { + errs = append(errs, field.Forbidden(field.NewPath("spec", "externalIPs").Index(i), "externalIP is not allowed")) + continue + } + } + } + + if len(errs) > 0 { + //if there are errors reported, resort to RBAC check to see + //if this is an admin user who can over-ride the check + allow, err := r.checkAccess(ctx, a) + if err != nil { + return err + } + if !allow { + return admission.NewForbidden(a, errs.ToAggregate()) + } + } + + return nil +} + +func (r *externalIPRanger) checkAccess(ctx context.Context, attr admission.Attributes) (bool, error) { + authzAttr := authorizer.AttributesRecord{ + User: attr.GetUserInfo(), + Verb: "create", + Resource: "service", + Subresource: "externalips", + APIGroup: "network.openshift.io", + ResourceRequest: true, + } + authorized, _, err := r.authorizer.Authorize(ctx, authzAttr) + return authorized == authorizer.DecisionAllow, err +} diff --git a/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission_test.go b/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission_test.go new file mode 100644 index 0000000000000..c29e3abe4f01e --- /dev/null +++ b/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission_test.go @@ -0,0 +1,322 @@ +package externalipranger + +import ( + "context" + "fmt" + "net" + "strings" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authorization/authorizer" + netutils "k8s.io/utils/net" + + "k8s.io/apiserver/pkg/authentication/user" + + "k8s.io/apiserver/pkg/authentication/serviceaccount" + kapi "k8s.io/kubernetes/pkg/apis/core" +) + +type fakeTestAuthorizer struct { + t *testing.T +} + +func fakeAuthorizer(t *testing.T) authorizer.Authorizer { + return &fakeTestAuthorizer{ + t: t, + } +} + +func (a *fakeTestAuthorizer) Authorize(_ context.Context, attributes authorizer.Attributes) (authorizer.Decision, string, error) { + ui := attributes.GetUser() + if ui == nil { + return authorizer.DecisionNoOpinion, "", fmt.Errorf("No valid UserInfo for Context") + } + // system:serviceaccount:test:admin user aka admin user is allowed to set + // external IPs + if ui.GetName() == "system:serviceaccount:test:admin" { + return authorizer.DecisionAllow, "", nil + } + // Non test:admin user aka without admin privileges: + return authorizer.DecisionDeny, "", nil +} + +// TestAdmission verifies various scenarios involving pod/project/global node label selectors +func TestAdmission(t *testing.T) { + svc := &kapi.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + } + var oldSvc *kapi.Service + + _, ipv4, err := netutils.ParseCIDRSloppy("172.0.0.0/16") + if err != nil { + t.Fatal(err) + } + _, ipv4subset, err := netutils.ParseCIDRSloppy("172.0.1.0/24") + if err != nil { + t.Fatal(err) + } + _, ipv4offset, err := netutils.ParseCIDRSloppy("172.200.0.0/24") + if err != nil { + t.Fatal(err) + } + _, none, err := netutils.ParseCIDRSloppy("0.0.0.0/32") + if err != nil { + t.Fatal(err) + } + _, all, err := netutils.ParseCIDRSloppy("0.0.0.0/0") + if err != nil { + t.Fatal(err) + } + + tests := []struct { + testName string + rejects, admits []*net.IPNet + op admission.Operation + externalIPs []string + admit bool + errFn func(err error) bool + loadBalancer bool + ingressIP string + userinfo user.Info + }{ + { + admit: true, + op: admission.Create, + testName: "No external IPs on create for test:ordinary-user user", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + op: admission.Update, + testName: "No external IPs on update for test:admin user", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "No external IPs allowed on create for test:ordinary-user user", + errFn: func(err error) bool { return strings.Contains(err.Error(), "externalIPs have been disabled") }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "External IPs allowed on create for test:admin user", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + { + admit: false, + externalIPs: []string{"1.2.3.4"}, + op: admission.Update, + testName: "No external IPs allowed on update", + errFn: func(err error) bool { return strings.Contains(err.Error(), "externalIPs have been disabled") }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + externalIPs: []string{"1.2.3.4"}, + op: admission.Update, + testName: "External IPs allowed on update for test:admin user", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "IP out of range on create", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIP is not allowed") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Update, + testName: "IP out of range on update", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIP is not allowed") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + rejects: []*net.IPNet{ipv4subset}, + externalIPs: []string{"172.0.1.1"}, + op: admission.Update, + testName: "IP out of range due to blacklist", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIP is not allowed") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + rejects: []*net.IPNet{ipv4offset}, + externalIPs: []string{"172.199.1.1"}, + op: admission.Update, + testName: "IP not in reject or admit", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIP is not allowed") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"172.0.0.1"}, + op: admission.Create, + testName: "IP in range on create for test:ordinary-user user", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"172.0.0.1"}, + op: admission.Update, + testName: "IP in range on update for test:admin user", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + // other checks + { + admit: false, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"abcd"}, + op: admission.Create, + testName: "IP unparseable on create", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIPs must be a valid address") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + admits: []*net.IPNet{none}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "IP range is empty for test:ordinary-user user", + errFn: func(err error) bool { return strings.Contains(err.Error(), "externalIP is not allowed") }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + admits: []*net.IPNet{none}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "IP range is empty, but test:admin user allowed", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + { + admit: false, + rejects: []*net.IPNet{all}, + admits: []*net.IPNet{all}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "rejections can cover the entire range", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + // Ingress IP checks + { + admit: true, + externalIPs: []string{"1.2.3.4"}, + op: admission.Update, + testName: "Ingress ip allowed when external ips are disabled", + loadBalancer: true, + ingressIP: "1.2.3.4", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"1.2.3.4", "172.0.0.1"}, + op: admission.Update, + testName: "Ingress ip allowed when external ips are enabled", + loadBalancer: true, + ingressIP: "1.2.3.4", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"1.2.3.4", "172.0.0.1"}, + op: admission.Update, + testName: "Ingress ip not allowed for non-lb service", + loadBalancer: false, + ingressIP: "1.2.3.4", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + } + for _, test := range tests { + svc.Spec.ExternalIPs = test.externalIPs + allowIngressIP := len(test.ingressIP) > 0 || test.loadBalancer + handler := NewExternalIPRanger(test.rejects, test.admits, allowIngressIP) + handler.SetAuthorizer(fakeAuthorizer(t)) + err := handler.ValidateInitialization() + if err != nil { + t.Errorf("%s: Got an error %s", test.testName, err) + continue + } + if test.loadBalancer { + svc.Spec.Type = kapi.ServiceTypeLoadBalancer + } else { + svc.Spec.Type = kapi.ServiceTypeClusterIP + } + + if len(test.ingressIP) > 0 { + // Provide an ingress ip via the previous object state + oldSvc = &kapi.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Status: kapi.ServiceStatus{ + LoadBalancer: kapi.LoadBalancerStatus{ + Ingress: []kapi.LoadBalancerIngress{ + { + IP: test.ingressIP, + }, + }, + }, + }, + } + + } else { + oldSvc = nil + } + + err = handler.Validate(context.TODO(), admission.NewAttributesRecord(svc, oldSvc, kapi.Kind("Service").WithVersion("version"), "namespace", svc.ObjectMeta.Name, kapi.Resource("services").WithVersion("version"), "", test.op, nil, false, test.userinfo), nil) + + if test.admit && err != nil { + t.Errorf("%s: expected no error but got: %s", test.testName, err) + } else if !test.admit && err == nil { + t.Errorf("%s: expected an error", test.testName) + } + if test.errFn != nil && !test.errFn(err) { + t.Errorf("%s: unexpected error: %v", test.testName, err) + } + } +} + +func TestHandles(t *testing.T) { + for op, shouldHandle := range map[admission.Operation]bool{ + admission.Create: true, + admission.Update: true, + admission.Connect: false, + admission.Delete: false, + } { + ranger := NewExternalIPRanger(nil, nil, false) + if e, a := shouldHandle, ranger.Handles(op); e != a { + t.Errorf("%v: shouldHandle=%t, handles=%t", op, e, a) + } + } +} diff --git a/openshift-kube-apiserver/admission/network/restrictedendpoints/endpoint_admission.go b/openshift-kube-apiserver/admission/network/restrictedendpoints/endpoint_admission.go new file mode 100644 index 0000000000000..b61b2a0bd62ed --- /dev/null +++ b/openshift-kube-apiserver/admission/network/restrictedendpoints/endpoint_admission.go @@ -0,0 +1,292 @@ +package restrictedendpoints + +import ( + "context" + "fmt" + "io" + "net" + "reflect" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/klog/v2" + kapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/discovery" + netutils "k8s.io/utils/net" + + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1" +) + +const RestrictedEndpointsPluginName = "network.openshift.io/RestrictedEndpointsAdmission" + +func RegisterRestrictedEndpoints(plugins *admission.Plugins) { + plugins.Register(RestrictedEndpointsPluginName, + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", RestrictedEndpointsPluginName) + return nil, nil + } + restrictedNetworks, err := ParseSimpleCIDRRules(pluginConfig.RestrictedCIDRs) + if err != nil { + // should have been caught with validation + return nil, err + } + + return NewRestrictedEndpointsAdmission(restrictedNetworks), nil + }) +} + +func readConfig(reader io.Reader) (*restrictedendpoints.RestrictedEndpointsAdmissionConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, restrictedendpoints.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*restrictedendpoints.RestrictedEndpointsAdmissionConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + // No validation needed since config is just list of strings + return config, nil +} + +type restrictedEndpointsAdmission struct { + *admission.Handler + + authorizer authorizer.Authorizer + restrictedNetworks []*net.IPNet +} + +var _ = initializer.WantsAuthorizer(&restrictedEndpointsAdmission{}) +var _ = admission.ValidationInterface(&restrictedEndpointsAdmission{}) + +// ParseSimpleCIDRRules parses a list of CIDR strings +func ParseSimpleCIDRRules(rules []string) (networks []*net.IPNet, err error) { + for _, s := range rules { + _, cidr, err := netutils.ParseCIDRSloppy(s) + if err != nil { + return nil, err + } + networks = append(networks, cidr) + } + return networks, nil +} + +// NewRestrictedEndpointsAdmission creates a new endpoints admission plugin. +func NewRestrictedEndpointsAdmission(restrictedNetworks []*net.IPNet) *restrictedEndpointsAdmission { + return &restrictedEndpointsAdmission{ + Handler: admission.NewHandler(admission.Create, admission.Update), + restrictedNetworks: restrictedNetworks, + } +} + +func (r *restrictedEndpointsAdmission) SetAuthorizer(a authorizer.Authorizer) { + r.authorizer = a +} + +func (r *restrictedEndpointsAdmission) ValidateInitialization() error { + if r.authorizer == nil { + return fmt.Errorf("missing authorizer") + } + return nil +} + +var ( + defaultRestrictedPorts = []kapi.EndpointPort{ + // MCS ports + {Protocol: kapi.ProtocolTCP, Port: 22623}, + {Protocol: kapi.ProtocolTCP, Port: 22624}, + } + defaultRestrictedNetworks = []*net.IPNet{ + // IPv4 link-local range 169.254.0.0/16 (including cloud metadata IP) + {IP: netutils.ParseIPSloppy("169.254.0.0"), Mask: net.CIDRMask(16, 32)}, + } +) + +func checkRestrictedIP(ipString string, restricted []*net.IPNet) error { + ip := netutils.ParseIPSloppy(ipString) + if ip == nil { + return nil + } + for _, net := range restricted { + if net.Contains(ip) { + return fmt.Errorf("endpoint address %s is not allowed", ipString) + } + } + return nil +} + +func checkRestrictedPort(protocol kapi.Protocol, port int32, restricted []kapi.EndpointPort) error { + for _, rport := range restricted { + if protocol == rport.Protocol && port == rport.Port { + return fmt.Errorf("endpoint port %s:%d is not allowed", protocol, port) + } + } + return nil +} + +func (r *restrictedEndpointsAdmission) endpointsFindRestrictedIP(ep *kapi.Endpoints, restricted []*net.IPNet) error { + for _, subset := range ep.Subsets { + for _, addr := range subset.Addresses { + if err := checkRestrictedIP(addr.IP, restricted); err != nil { + return err + } + } + for _, addr := range subset.NotReadyAddresses { + if err := checkRestrictedIP(addr.IP, restricted); err != nil { + return err + } + } + } + return nil +} + +func (r *restrictedEndpointsAdmission) endpointsFindRestrictedPort(ep *kapi.Endpoints, restricted []kapi.EndpointPort) error { + for _, subset := range ep.Subsets { + for _, port := range subset.Ports { + if err := checkRestrictedPort(port.Protocol, port.Port, restricted); err != nil { + return err + } + } + } + return nil +} + +func (r *restrictedEndpointsAdmission) endpointsCheckAccess(ctx context.Context, attr admission.Attributes) (bool, error) { + authzAttr := authorizer.AttributesRecord{ + User: attr.GetUserInfo(), + Verb: "create", + Namespace: attr.GetNamespace(), + Resource: "endpoints", + Subresource: "restricted", + APIGroup: kapi.GroupName, + Name: attr.GetName(), + ResourceRequest: true, + } + authorized, _, err := r.authorizer.Authorize(ctx, authzAttr) + return authorized == authorizer.DecisionAllow, err +} + +func (r *restrictedEndpointsAdmission) endpointsValidate(ctx context.Context, a admission.Attributes) error { + ep, ok := a.GetObject().(*kapi.Endpoints) + if !ok { + return nil + } + old, ok := a.GetOldObject().(*kapi.Endpoints) + if ok && reflect.DeepEqual(ep.Subsets, old.Subsets) { + return nil + } + + restrictedErr := r.endpointsFindRestrictedIP(ep, r.restrictedNetworks) + if restrictedErr == nil { + restrictedErr = r.endpointsFindRestrictedIP(ep, defaultRestrictedNetworks) + } + if restrictedErr == nil { + restrictedErr = r.endpointsFindRestrictedPort(ep, defaultRestrictedPorts) + } + if restrictedErr == nil { + return nil + } + + allow, err := r.endpointsCheckAccess(ctx, a) + if err != nil { + return err + } + if !allow { + return admission.NewForbidden(a, restrictedErr) + } + return nil +} + +func (r *restrictedEndpointsAdmission) sliceFindRestrictedIP(slice *discovery.EndpointSlice, restricted []*net.IPNet) error { + for _, endpoint := range slice.Endpoints { + for _, addr := range endpoint.Addresses { + if err := checkRestrictedIP(addr, restricted); err != nil { + return err + } + } + } + return nil +} + +func (r *restrictedEndpointsAdmission) sliceFindRestrictedPort(slice *discovery.EndpointSlice, restricted []kapi.EndpointPort) error { + for _, port := range slice.Ports { + if port.Port == nil { + continue + } + sliceProtocol := kapi.ProtocolTCP + if port.Protocol != nil { + sliceProtocol = *port.Protocol + } + if err := checkRestrictedPort(sliceProtocol, *port.Port, restricted); err != nil { + return err + } + } + return nil +} + +func (r *restrictedEndpointsAdmission) sliceCheckAccess(ctx context.Context, attr admission.Attributes) (bool, error) { + authzAttr := authorizer.AttributesRecord{ + User: attr.GetUserInfo(), + Verb: "create", + Namespace: attr.GetNamespace(), + Resource: "endpointslices", + Subresource: "restricted", + APIGroup: discovery.GroupName, + Name: attr.GetName(), + ResourceRequest: true, + } + authorized, _, err := r.authorizer.Authorize(ctx, authzAttr) + return authorized == authorizer.DecisionAllow, err +} + +func (r *restrictedEndpointsAdmission) sliceValidate(ctx context.Context, a admission.Attributes) error { + slice, ok := a.GetObject().(*discovery.EndpointSlice) + if !ok { + return nil + } + old, ok := a.GetOldObject().(*discovery.EndpointSlice) + if ok && reflect.DeepEqual(slice.Endpoints, old.Endpoints) && reflect.DeepEqual(slice.Ports, old.Ports) { + return nil + } + + restrictedErr := r.sliceFindRestrictedIP(slice, r.restrictedNetworks) + if restrictedErr == nil { + restrictedErr = r.sliceFindRestrictedIP(slice, defaultRestrictedNetworks) + } + if restrictedErr == nil { + restrictedErr = r.sliceFindRestrictedPort(slice, defaultRestrictedPorts) + } + if restrictedErr == nil { + return nil + } + + allow, err := r.sliceCheckAccess(ctx, a) + if err != nil { + return err + } + if !allow { + return admission.NewForbidden(a, restrictedErr) + } + return nil +} + +// Validate determines if the endpoints or endpointslice object should be admitted +func (r *restrictedEndpointsAdmission) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) error { + if a.GetResource().GroupResource() == kapi.Resource("endpoints") { + return r.endpointsValidate(ctx, a) + } else if a.GetResource().GroupResource() == discovery.Resource("endpointslices") { + return r.sliceValidate(ctx, a) + } else { + return nil + } +} diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/doc.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/doc.go new file mode 100644 index 0000000000000..1e09e2208b6a2 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package hostassignment is the internal version of the API. +package hostassignment diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/register.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/register.go new file mode 100644 index 0000000000000..d43ac830c6152 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/register.go @@ -0,0 +1,31 @@ +package hostassignment + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var GroupVersion = schema.GroupVersion{Group: "route.openshift.io", Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return GroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return GroupVersion.WithResource(resource).GroupResource() +} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &HostAssignmentAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/types.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/types.go new file mode 100644 index 0000000000000..05b11cf541cac --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/types.go @@ -0,0 +1,17 @@ +package hostassignment + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HostAssignmentAdmissionConfig is the configuration for the the route host assignment plugin. +type HostAssignmentAdmissionConfig struct { + metav1.TypeMeta + + // domain is used to generate a default host name for a route when the + // route's host name is empty. The generated host name will follow this + // pattern: "..". + Domain string +} diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/doc.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/doc.go new file mode 100644 index 0000000000000..07ffba69df66e --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/hostassignment + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/register.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/register.go new file mode 100644 index 0000000000000..4db9b98bd4f31 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/register.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName specifies the group name used to register the objects. +const GroupName = "route.openshift.io" + +// GroupVersion specifies the group and the version used to register the objects. +var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1"} + +// SchemeGroupVersion is group version used to register these objects +// Deprecated: use GroupVersion instead. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // Depreciated: use Install instead + AddToScheme = localSchemeBuilder.AddToScheme + Install = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HostAssignmentAdmissionConfig{}, + ) + // AddToGroupVersion allows the serialization of client types like ListOptions. + v1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/types.go b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/types.go new file mode 100644 index 0000000000000..0537567d18355 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/hostassignment/v1/types.go @@ -0,0 +1,17 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HostAssignmentAdmissionConfig is the configuration for the the route host assignment plugin. +type HostAssignmentAdmissionConfig struct { + metav1.TypeMeta `json:",inline"` + + // domain is used to generate a default host name for a route when the + // route's host name is empty. The generated host name will follow this + // pattern: "..". + Domain string `json:"domain"` +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/doc.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/doc.go new file mode 100644 index 0000000000000..04727861a1ea1 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package api is the internal version of the API. +package ingressadmission diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/register.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/register.go new file mode 100644 index 0000000000000..e0e84492781a6 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/register.go @@ -0,0 +1,33 @@ +package ingressadmission + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var GroupVersion = schema.GroupVersion{Group: "route.openshift.io", Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return GroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return GroupVersion.WithResource(resource).GroupResource() +} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &IngressAdmissionConfig{}, + ) + return nil +} + +func (obj *IngressAdmissionConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/types.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/types.go new file mode 100644 index 0000000000000..bc1356398663c --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/types.go @@ -0,0 +1,22 @@ +package ingressadmission + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IngressAdmissionConfig is the configuration for the the ingress +// controller limiter plugin. It changes the behavior of ingress +// objects to behave better with openshift routes and routers. +// *NOTE* This has security implications in the router when handling +// ingress objects +type IngressAdmissionConfig struct { + metav1.TypeMeta + + // AllowHostnameChanges when false or unset openshift does not + // allow changing or adding hostnames to ingress objects. If set + // to true then hostnames can be added or modified which has + // security implications in the router. + AllowHostnameChanges bool +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/defaults_test.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/defaults_test.go new file mode 100644 index 0000000000000..e105c48094abc --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/defaults_test.go @@ -0,0 +1,59 @@ +package v1 + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/api/apitesting" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" +) + +func roundTrip(t *testing.T, obj runtime.Object) runtime.Object { + scheme, codecs := apitesting.SchemeForOrDie(Install) + data, err := runtime.Encode(codecs.LegacyCodec(GroupVersion), obj) + if err != nil { + t.Errorf("%v\n %#v", err, obj) + return nil + } + obj2, err := runtime.Decode(codecs.UniversalDecoder(), data) + if err != nil { + t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj) + return nil + } + obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object) + err = scheme.Convert(obj2, obj3, nil) + if err != nil { + t.Errorf("%v\nSourceL %#v", err, obj2) + return nil + } + return obj3 +} + +func TestDefaults(t *testing.T) { + tests := []struct { + original *IngressAdmissionConfig + expected *IngressAdmissionConfig + }{ + { + original: &IngressAdmissionConfig{}, + expected: &IngressAdmissionConfig{ + AllowHostnameChanges: false, + }, + }, + } + for i, test := range tests { + t.Logf("test %d", i) + original := test.original + expected := test.expected + obj2 := roundTrip(t, runtime.Object(original)) + got, ok := obj2.(*IngressAdmissionConfig) + if !ok { + t.Errorf("unexpected object: %v", got) + t.FailNow() + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("got different than expected:\nA:\t%#v\nB:\t%#v\n\nDiff:\n%s\n\n%s", got, expected, diff.ObjectDiff(expected, got), diff.ObjectGoPrintSideBySide(expected, got)) + } + } +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/doc.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/doc.go new file mode 100644 index 0000000000000..65269e693b22a --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/register.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/register.go new file mode 100644 index 0000000000000..aecb8a6eec279 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/register.go @@ -0,0 +1,27 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission" +) + +func (obj *IngressAdmissionConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +var GroupVersion = schema.GroupVersion{Group: "route.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + ingressadmission.Install, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &IngressAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/swagger_doc.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/swagger_doc.go new file mode 100644 index 0000000000000..27266bc8b3f6d --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/swagger_doc.go @@ -0,0 +1,15 @@ +package v1 + +// This file contains methods that can be used by the go-restful package to generate Swagger +// documentation for the object types found in 'types.go' This file is automatically generated +// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift. +// ==== DO NOT EDIT THIS FILE MANUALLY ==== + +var map_IngressAdmissionConfig = map[string]string{ + "": "IngressAdmissionConfig is the configuration for the the ingress controller limiter plugin. It changes the behavior of ingress objects to behave better with openshift routes and routers. *NOTE* This has security implications in the router when handling ingress objects", + "allowHostnameChanges": "AllowHostnameChanges when false or unset openshift does not allow changing or adding hostnames to ingress objects. If set to true then hostnames can be added or modified which has security implications in the router.", +} + +func (IngressAdmissionConfig) SwaggerDoc() map[string]string { + return map_IngressAdmissionConfig +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/types.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/types.go new file mode 100644 index 0000000000000..a770d0539f449 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/types.go @@ -0,0 +1,22 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IngressAdmissionConfig is the configuration for the the ingress +// controller limiter plugin. It changes the behavior of ingress +// objects to behave better with openshift routes and routers. +// *NOTE* This has security implications in the router when handling +// ingress objects +type IngressAdmissionConfig struct { + metav1.TypeMeta `json:",inline"` + + // AllowHostnameChanges when false or unset openshift does not + // allow changing or adding hostnames to ingress objects. If set + // to true then hostnames can be added or modified which has + // security implications in the router. + AllowHostnameChanges bool `json:"allowHostnameChanges"` +} diff --git a/openshift-kube-apiserver/admission/route/hostassignment/admission.go b/openshift-kube-apiserver/admission/route/hostassignment/admission.go new file mode 100644 index 0000000000000..f454bd22d5640 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/hostassignment/admission.go @@ -0,0 +1,167 @@ +package hostassignment + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/kubernetes" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" + + routev1 "github.com/openshift/api/route/v1" + "github.com/openshift/library-go/pkg/config/helpers" + routecommon "github.com/openshift/library-go/pkg/route" + "github.com/openshift/library-go/pkg/route/hostassignment" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/route" + hostassignmentapi "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/hostassignment" + hostassignmentv1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/hostassignment/v1" +) + +const PluginName = "route.openshift.io/RouteHostAssignment" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + return newHostAssignment(pluginConfig) + }) +} + +type hostAssignment struct { + *admission.Handler + + hostnameGenerator hostassignment.HostnameGenerator + sarClient authorizationv1.SubjectAccessReviewInterface + validationOpts routecommon.RouteValidationOptions +} + +func readConfig(reader io.Reader) (*hostassignmentapi.HostAssignmentAdmissionConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, hostassignmentapi.Install, hostassignmentv1.Install) + if err != nil { + return nil, err + } + if obj == nil { + scheme := runtime.NewScheme() + hostassignmentapi.Install(scheme) + hostassignmentv1.Install(scheme) + external := &hostassignmentv1.HostAssignmentAdmissionConfig{} + scheme.Default(external) + internal := &hostassignmentapi.HostAssignmentAdmissionConfig{} + if err := scheme.Convert(external, internal, nil); err != nil { + return nil, fmt.Errorf("failed to produce default config: %w", err) + } + obj = internal + } + config, ok := obj.(*hostassignmentapi.HostAssignmentAdmissionConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + return config, nil +} + +func newHostAssignment(config *hostassignmentapi.HostAssignmentAdmissionConfig) (*hostAssignment, error) { + hostnameGenerator, err := hostassignment.NewSimpleAllocationPlugin(config.Domain) + if err != nil { + return nil, fmt.Errorf("configuration failed: %w", err) + } + return &hostAssignment{ + Handler: admission.NewHandler(admission.Create, admission.Update), + hostnameGenerator: hostnameGenerator, + }, nil +} + +func toRoute(uncastObj runtime.Object) (*routev1.Route, runtime.Unstructured, field.ErrorList) { + u, ok := uncastObj.(runtime.Unstructured) + if !ok { + return nil, nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Route"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{routev1.GroupVersion.String()}), + } + } + + var out routev1.Route + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.UnstructuredContent(), &out); err != nil { + return nil, nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Route"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{routev1.GroupVersion.String()}), + } + } + + return &out, u, nil +} + +var _ admission.MutationInterface = &hostAssignment{} + +func (a *hostAssignment) Admit(ctx context.Context, attributes admission.Attributes, o admission.ObjectInterfaces) error { + if attributes.GetResource().GroupResource() != (schema.GroupResource{Group: "route.openshift.io", Resource: "routes"}) { + return nil + } + // if a subresource is specified, skip it + if len(attributes.GetSubresource()) > 0 { + return nil + } + + switch attributes.GetOperation() { + case admission.Create: + r, u, errs := toRoute(attributes.GetObject()) + if len(errs) > 0 { + return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errs) + } + errs = hostassignment.AllocateHost(ctx, r, a.sarClient, a.hostnameGenerator, a.validationOpts) + if len(errs) > 0 { + return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errs) + } + content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(r) + if err != nil { + return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), field.ErrorList{ + field.InternalError(field.NewPath(""), err), + }) + } + u.SetUnstructuredContent(content) + case admission.Update: + r, _, errs := toRoute(attributes.GetObject()) + if len(errs) > 0 { + return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errs) + } + old, _, errs := toRoute(attributes.GetOldObject()) + if len(errs) > 0 { + return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errs) + } + + errs = hostassignment.ValidateHostExternalCertificate(ctx, r, old, a.sarClient, a.validationOpts) + if len(errs) > 0 { + return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errs) + } + + errs = hostassignment.ValidateHostUpdate(ctx, r, old, a.sarClient, a.validationOpts) + if len(errs) > 0 { + return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errs) + } + default: + return admission.NewForbidden(attributes, fmt.Errorf("unhandled operation: %v", attributes.GetOperation())) + } + + return nil +} + +var _ initializer.WantsExternalKubeClientSet = &hostAssignment{} + +func (a *hostAssignment) SetExternalKubeClientSet(clientset kubernetes.Interface) { + a.sarClient = clientset.AuthorizationV1().SubjectAccessReviews() + a.validationOpts = route.NewRouteValidationOpts().GetValidationOptions() +} + +func (a *hostAssignment) ValidateInitialization() error { + if a.sarClient == nil { + return fmt.Errorf("missing SubjectAccessReview client") + } + return nil +} diff --git a/openshift-kube-apiserver/admission/route/ingress_admission.go b/openshift-kube-apiserver/admission/route/ingress_admission.go new file mode 100644 index 0000000000000..f59104fe51a08 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/ingress_admission.go @@ -0,0 +1,162 @@ +// This plugin supplements upstream Ingress admission validation +// It takes care of current Openshift specific constraints on Ingress resources +package admission + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authorization/authorizer" + kextensions "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/networking" + + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1" +) + +const ( + IngressAdmission = "route.openshift.io/IngressAdmission" +) + +func Register(plugins *admission.Plugins) { + plugins.Register(IngressAdmission, + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + return NewIngressAdmission(pluginConfig), nil + }) +} + +type ingressAdmission struct { + *admission.Handler + config *ingressadmission.IngressAdmissionConfig + authorizer authorizer.Authorizer +} + +var _ = initializer.WantsAuthorizer(&ingressAdmission{}) +var _ = admission.ValidationInterface(&ingressAdmission{}) + +func NewIngressAdmission(config *ingressadmission.IngressAdmissionConfig) *ingressAdmission { + return &ingressAdmission{ + Handler: admission.NewHandler(admission.Create, admission.Update), + config: config, + } +} + +func readConfig(reader io.Reader) (*ingressadmission.IngressAdmissionConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, ingressadmission.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*ingressadmission.IngressAdmissionConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + // No validation needed since config is just list of strings + return config, nil +} + +func (r *ingressAdmission) SetAuthorizer(a authorizer.Authorizer) { + r.authorizer = a +} + +func (r *ingressAdmission) ValidateInitialization() error { + if r.authorizer == nil { + return fmt.Errorf("%s needs an Openshift Authorizer", IngressAdmission) + } + return nil +} + +func (r *ingressAdmission) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) error { + if a.GetResource().GroupResource() == kextensions.Resource("ingresses") { + switch a.GetOperation() { + case admission.Create: + if ingress, ok := a.GetObject().(*networking.Ingress); ok { + // if any rules have a host, check whether the user has permission to set them + for i, rule := range ingress.Spec.Rules { + if len(rule.Host) > 0 { + attr := authorizer.AttributesRecord{ + User: a.GetUserInfo(), + Verb: "create", + Namespace: a.GetNamespace(), + Resource: "routes", + Subresource: "custom-host", + APIGroup: "route.openshift.io", + ResourceRequest: true, + } + kind := schema.GroupKind{Group: a.GetResource().Group, Kind: a.GetResource().Resource} + authorized, _, err := r.authorizer.Authorize(ctx, attr) + if err != nil { + return errors.NewInvalid(kind, ingress.Name, field.ErrorList{field.InternalError(field.NewPath("spec", "rules").Index(i), err)}) + } + if authorized != authorizer.DecisionAllow { + return errors.NewInvalid(kind, ingress.Name, field.ErrorList{field.Forbidden(field.NewPath("spec", "rules").Index(i), "you do not have permission to set host fields in ingress rules")}) + } + break + } + } + } + case admission.Update: + if r.config == nil || r.config.AllowHostnameChanges == false { + oldIngress, ok := a.GetOldObject().(*networking.Ingress) + if !ok { + return nil + } + newIngress, ok := a.GetObject().(*networking.Ingress) + if !ok { + return nil + } + if !haveHostnamesChanged(oldIngress, newIngress) { + attr := authorizer.AttributesRecord{ + User: a.GetUserInfo(), + Verb: "update", + Namespace: a.GetNamespace(), + Name: a.GetName(), + Resource: "routes", + Subresource: "custom-host", + APIGroup: "route.openshift.io", + ResourceRequest: true, + } + kind := schema.GroupKind{Group: a.GetResource().Group, Kind: a.GetResource().Resource} + authorized, _, err := r.authorizer.Authorize(ctx, attr) + if err != nil { + return errors.NewInvalid(kind, newIngress.Name, field.ErrorList{field.InternalError(field.NewPath("spec", "rules"), err)}) + } + if authorized == authorizer.DecisionAllow { + return nil + } + return fmt.Errorf("cannot change hostname") + } + } + } + } + return nil +} + +func haveHostnamesChanged(oldIngress, newIngress *networking.Ingress) bool { + hostnameSet := sets.NewString() + for _, element := range oldIngress.Spec.Rules { + hostnameSet.Insert(element.Host) + } + + for _, element := range newIngress.Spec.Rules { + if present := hostnameSet.Has(element.Host); !present { + return false + } + } + + return true +} diff --git a/openshift-kube-apiserver/admission/route/ingress_admission_test.go b/openshift-kube-apiserver/admission/route/ingress_admission_test.go new file mode 100644 index 0000000000000..b1013b8346a30 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/ingress_admission_test.go @@ -0,0 +1,171 @@ +package admission + +import ( + "context" + "testing" + + "k8s.io/kubernetes/pkg/apis/networking" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authorization/authorizer" + kextensions "k8s.io/kubernetes/pkg/apis/extensions" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission" +) + +type fakeAuthorizer struct { + allow authorizer.Decision + err error +} + +func (a *fakeAuthorizer) Authorize(context.Context, authorizer.Attributes) (authorizer.Decision, string, error) { + return a.allow, "", a.err +} + +func TestAdmission(t *testing.T) { + var newIngress *networking.Ingress + var oldIngress *networking.Ingress + + tests := []struct { + config *ingressadmission.IngressAdmissionConfig + testName string + oldHost, newHost string + op admission.Operation + admit bool + allow authorizer.Decision + }{ + { + admit: true, + config: emptyConfig(), + op: admission.Create, + testName: "No errors on create", + }, + { + admit: true, + config: emptyConfig(), + op: admission.Update, + newHost: "foo.com", + oldHost: "foo.com", + testName: "keeping the host the same should pass", + }, + { + admit: true, + config: emptyConfig(), + op: admission.Update, + oldHost: "foo.com", + testName: "deleting a hostname should pass", + }, + { + admit: false, + config: emptyConfig(), + op: admission.Update, + newHost: "foo.com", + oldHost: "bar.com", + testName: "changing hostname should fail", + }, + { + admit: true, + allow: authorizer.DecisionAllow, + config: emptyConfig(), + op: admission.Update, + newHost: "foo.com", + oldHost: "bar.com", + testName: "changing hostname should succeed if the user has permission", + }, + { + admit: false, + config: nil, + op: admission.Update, + newHost: "foo.com", + oldHost: "bar.com", + testName: "unconfigured plugin should still fail", + }, + { + admit: true, + config: testConfigUpdateAllow(), + op: admission.Update, + newHost: "foo.com", + oldHost: "bar.com", + testName: "Upstream Hostname updates enabled", + }, + { + admit: true, + config: testConfigUpdateAllow(), + op: admission.Update, + newHost: "foo.com", + testName: "add new hostname with upstream rules", + }, + { + admit: false, + allow: authorizer.DecisionNoOpinion, + config: emptyConfig(), + op: admission.Create, + newHost: "foo.com", + testName: "setting the host should require permission", + }, + { + admit: true, + allow: authorizer.DecisionAllow, + config: emptyConfig(), + op: admission.Create, + newHost: "foo.com", + testName: "setting the host should pass if user has permission", + }, + } + for _, test := range tests { + if len(test.newHost) > 0 { + newIngress = &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: test.newHost, + }, + }, + }, + } + } else { + //Used to test deleting a hostname + newIngress = &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + } + } + handler := NewIngressAdmission(test.config) + handler.SetAuthorizer(&fakeAuthorizer{allow: test.allow}) + + if len(test.oldHost) > 0 { + //Provides the previous state of an ingress object + oldIngress = &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: test.oldHost, + }, + }, + }, + } + } else { + oldIngress = nil + } + + err := handler.Validate(context.TODO(), admission.NewAttributesRecord(newIngress, oldIngress, kextensions.Kind("ingresses").WithVersion("Version"), "namespace", newIngress.ObjectMeta.Name, kextensions.Resource("ingresses").WithVersion("version"), "", test.op, nil, false, nil), nil) + if test.admit && err != nil { + t.Errorf("%s: expected no error but got: %s", test.testName, err) + } else if !test.admit && err == nil { + t.Errorf("%s: expected an error", test.testName) + } + } + +} + +func emptyConfig() *ingressadmission.IngressAdmissionConfig { + return &ingressadmission.IngressAdmissionConfig{} +} + +func testConfigUpdateAllow() *ingressadmission.IngressAdmissionConfig { + return &ingressadmission.IngressAdmissionConfig{ + AllowHostnameChanges: true, + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/doc.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/doc.go new file mode 100644 index 0000000000000..ae163f472d40a --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package api is the internal version of the API. +package podnodeconstraints diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/register.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/register.go new file mode 100644 index 0000000000000..5b8add00bb815 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/register.go @@ -0,0 +1,33 @@ +package podnodeconstraints + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var GroupVersion = schema.GroupVersion{Group: "scheduling.openshift.io", Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return GroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return GroupVersion.WithResource(resource).GroupResource() +} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &PodNodeConstraintsConfig{}, + ) + return nil +} + +func (obj *PodNodeConstraintsConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/types.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/types.go new file mode 100644 index 0000000000000..27cebad199ed0 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/types.go @@ -0,0 +1,19 @@ +package podnodeconstraints + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodNodeConstraintsConfig is the configuration for the pod node name +// and node selector constraint plug-in. For accounts, serviceaccounts, +// and groups which lack the "pods/binding" permission, Loading this +// plugin will prevent setting NodeName on pod specs and will prevent +// setting NodeSelectors whose labels appear in the blacklist field +// "NodeSelectorLabelBlacklist" +type PodNodeConstraintsConfig struct { + metav1.TypeMeta + // NodeSelectorLabelBlacklist specifies a list of labels which cannot be set by entities without the "pods/binding" permission + NodeSelectorLabelBlacklist []string +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults.go new file mode 100644 index 0000000000000..54d718cfc91af --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults.go @@ -0,0 +1,19 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func SetDefaults_PodNodeConstraintsConfig(obj *PodNodeConstraintsConfig) { + if obj.NodeSelectorLabelBlacklist == nil { + obj.NodeSelectorLabelBlacklist = []string{ + corev1.LabelHostname, + } + } +} + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&PodNodeConstraintsConfig{}, func(obj interface{}) { SetDefaults_PodNodeConstraintsConfig(obj.(*PodNodeConstraintsConfig)) }) + return nil +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults_test.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults_test.go new file mode 100644 index 0000000000000..513084ad95122 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults_test.go @@ -0,0 +1,59 @@ +package v1 + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/api/apitesting" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" +) + +func roundTrip(t *testing.T, obj runtime.Object) runtime.Object { + scheme, codecs := apitesting.SchemeForOrDie(Install) + data, err := runtime.Encode(codecs.LegacyCodec(GroupVersion), obj) + if err != nil { + t.Errorf("%v\n %#v", err, obj) + return nil + } + obj2, err := runtime.Decode(codecs.UniversalDecoder(), data) + if err != nil { + t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj) + return nil + } + obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object) + err = scheme.Convert(obj2, obj3, nil) + if err != nil { + t.Errorf("%v\nSource: %#v", err, obj2) + return nil + } + return obj3 +} + +func TestDefaults(t *testing.T) { + tests := []struct { + original *PodNodeConstraintsConfig + expected *PodNodeConstraintsConfig + }{ + { + original: &PodNodeConstraintsConfig{}, + expected: &PodNodeConstraintsConfig{ + NodeSelectorLabelBlacklist: []string{"kubernetes.io/hostname"}, + }, + }, + } + for i, test := range tests { + t.Logf("test %d", i) + original := test.original + expected := test.expected + obj2 := roundTrip(t, runtime.Object(original)) + got, ok := obj2.(*PodNodeConstraintsConfig) + if !ok { + t.Errorf("unexpected object: %v", got) + t.FailNow() + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("got different than expected:\nA:\t%#v\nB:\t%#v\n\nDiff:\n%s\n\n%s", got, expected, diff.ObjectDiff(expected, got), diff.ObjectGoPrintSideBySide(expected, got)) + } + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/doc.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/doc.go new file mode 100644 index 0000000000000..602ddf4d19a41 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/register.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/register.go new file mode 100644 index 0000000000000..b836b750fdb3f --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/register.go @@ -0,0 +1,28 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints" +) + +func (obj *PodNodeConstraintsConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +var GroupVersion = schema.GroupVersion{Group: "scheduling.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + podnodeconstraints.Install, + + addDefaultingFuncs, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &PodNodeConstraintsConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/swagger_doc.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/swagger_doc.go new file mode 100644 index 0000000000000..95e3d2220841c --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/swagger_doc.go @@ -0,0 +1,15 @@ +package v1 + +// This file contains methods that can be used by the go-restful package to generate Swagger +// documentation for the object types found in 'types.go' This file is automatically generated +// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift. +// ==== DO NOT EDIT THIS FILE MANUALLY ==== + +var map_PodNodeConstraintsConfig = map[string]string{ + "": "PodNodeConstraintsConfig is the configuration for the pod node name and node selector constraint plug-in. For accounts, serviceaccounts and groups which lack the \"pods/binding\" permission, Loading this plugin will prevent setting NodeName on pod specs and will prevent setting NodeSelectors whose labels appear in the blacklist field \"NodeSelectorLabelBlacklist\"", + "nodeSelectorLabelBlacklist": "NodeSelectorLabelBlacklist specifies a list of labels which cannot be set by entities without the \"pods/binding\" permission", +} + +func (PodNodeConstraintsConfig) SwaggerDoc() map[string]string { + return map_PodNodeConstraintsConfig +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/types.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/types.go new file mode 100644 index 0000000000000..3ffd5acdb8952 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/types.go @@ -0,0 +1,20 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodNodeConstraintsConfig is the configuration for the pod node name +// and node selector constraint plug-in. For accounts, serviceaccounts +// and groups which lack the "pods/binding" permission, Loading this +// plugin will prevent setting NodeName on pod specs and will prevent +// setting NodeSelectors whose labels appear in the blacklist field +// "NodeSelectorLabelBlacklist" +type PodNodeConstraintsConfig struct { + metav1.TypeMeta `json:",inline"` + + // NodeSelectorLabelBlacklist specifies a list of labels which cannot be set by entities without the "pods/binding" permission + NodeSelectorLabelBlacklist []string `json:"nodeSelectorLabelBlacklist" description:"list of labels which cannot be set by entities without the 'pods/binding' permission"` +} diff --git a/openshift-kube-apiserver/admission/scheduler/nodeenv/admission.go b/openshift-kube-apiserver/admission/scheduler/nodeenv/admission.go new file mode 100644 index 0000000000000..b52b8242550cc --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/nodeenv/admission.go @@ -0,0 +1,174 @@ +package nodeenv + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + corev1listers "k8s.io/client-go/listers/core/v1" + coreapi "k8s.io/kubernetes/pkg/apis/core" + + projectv1 "github.com/openshift/api/project/v1" + "github.com/openshift/apiserver-library-go/pkg/labelselector" +) + +func Register(plugins *admission.Plugins) { + plugins.Register("scheduling.openshift.io/OriginPodNodeEnvironment", + func(config io.Reader) (admission.Interface, error) { + return NewPodNodeEnvironment() + }) +} + +const ( + timeToWaitForCacheSync = 10 * time.Second + kubeProjectNodeSelector = "scheduler.alpha.kubernetes.io/node-selector" +) + +// podNodeEnvironment is an implementation of admission.MutationInterface. +type podNodeEnvironment struct { + *admission.Handler + nsLister corev1listers.NamespaceLister + nsListerSynced func() bool + nodeLister corev1listers.NodeLister + nodeListerSynced func() bool + // TODO this should become a piece of config passed to the admission plugin + defaultNodeSelector string +} + +var _ = initializer.WantsExternalKubeInformerFactory(&podNodeEnvironment{}) +var _ = WantsDefaultNodeSelector(&podNodeEnvironment{}) +var _ = admission.ValidationInterface(&podNodeEnvironment{}) +var _ = admission.MutationInterface(&podNodeEnvironment{}) + +// Admit enforces that pod and its project node label selectors matches at least a node in the cluster. +func (p *podNodeEnvironment) admit(ctx context.Context, a admission.Attributes, mutationAllowed bool) (err error) { + resource := a.GetResource().GroupResource() + if resource != corev1.Resource("pods") { + return nil + } + if a.GetSubresource() != "" { + // only run the checks below on pods proper and not subresources + return nil + } + + obj := a.GetObject() + pod, ok := obj.(*coreapi.Pod) + if !ok { + return nil + } + + name := pod.Name + + if !p.waitForSyncedStore(time.After(timeToWaitForCacheSync)) { + return admission.NewForbidden(a, errors.New("scheduling.openshift.io/OriginPodNodeEnvironment: caches not synchronized")) + } + namespace, err := p.nsLister.Get(a.GetNamespace()) + if err != nil { + return apierrors.NewForbidden(resource, name, err) + } + + // If scheduler.alpha.kubernetes.io/node-selector is set on the namespace, + // do not process the pod further. + if _, ok := namespace.ObjectMeta.Annotations[kubeProjectNodeSelector]; ok { + return nil + } + + selector := p.defaultNodeSelector + if projectNodeSelector, ok := namespace.ObjectMeta.Annotations[projectv1.ProjectNodeSelector]; ok { + selector = projectNodeSelector + } + // we might consider in the future to allow advanced syntax selectors and use labels.Parse here instead + projectNodeSelector, err := labelselector.Parse(selector) + if err != nil { + return err + } + + if labelselector.Conflicts(projectNodeSelector, pod.Spec.NodeSelector) { + return apierrors.NewForbidden(resource, name, fmt.Errorf("pod node label selector conflicts with its project node label selector")) + } + + if !mutationAllowed && len(labelselector.Merge(projectNodeSelector, pod.Spec.NodeSelector)) != len(pod.Spec.NodeSelector) { + // no conflict, different size => pod.Spec.NodeSelector does not contain projectNodeSelector + return apierrors.NewForbidden(resource, name, fmt.Errorf("pod node label selector does not extend project node label selector")) + } + + if len(pod.Spec.NodeName) > 0 && len(projectNodeSelector) > 0 { + node, err := p.nodeLister.Get(pod.Spec.NodeName) + if err != nil { + return apierrors.NewForbidden(resource, name, fmt.Errorf("cannot validate project node label selector: %v", err)) + } + projectNodeSelectorAdvanced, err := labels.Parse(selector) + if err != nil { + return err + } + if !projectNodeSelectorAdvanced.Matches(labels.Set(node.Labels)) { + return apierrors.NewForbidden(resource, name, fmt.Errorf("pod node name conflicts with project node label selector")) + } + } + + // modify pod node selector = project node selector + current pod node selector + pod.Spec.NodeSelector = labelselector.Merge(projectNodeSelector, pod.Spec.NodeSelector) + + return nil +} + +func (p *podNodeEnvironment) Admit(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) (err error) { + return p.admit(ctx, a, true) +} + +func (p *podNodeEnvironment) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) (err error) { + return p.admit(ctx, a, false) +} + +func (p *podNodeEnvironment) SetDefaultNodeSelector(in string) { + p.defaultNodeSelector = in +} + +func (p *podNodeEnvironment) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + p.nsLister = kubeInformers.Core().V1().Namespaces().Lister() + p.nsListerSynced = kubeInformers.Core().V1().Namespaces().Informer().HasSynced + p.nodeLister = kubeInformers.Core().V1().Nodes().Lister() + p.nodeListerSynced = kubeInformers.Core().V1().Nodes().Informer().HasSynced +} + +func (p *podNodeEnvironment) waitForSyncedStore(timeout <-chan time.Time) bool { + for !p.nsListerSynced() || !p.nodeListerSynced() { + select { + case <-time.After(100 * time.Millisecond): + case <-timeout: + return p.nsListerSynced() && p.nodeListerSynced() + } + } + + return true +} + +func (p *podNodeEnvironment) ValidateInitialization() error { + if p.nsLister == nil { + return fmt.Errorf("project node environment plugin needs a namespace lister") + } + if p.nsListerSynced == nil { + return fmt.Errorf("project node environment plugin needs a namespace lister synced") + } + if p.nodeLister == nil { + return fmt.Errorf("project node environment plugin needs a node lister") + } + if p.nodeListerSynced == nil { + return fmt.Errorf("project node environment plugin needs a node lister synced") + } + return nil +} + +func NewPodNodeEnvironment() (admission.Interface, error) { + return &podNodeEnvironment{ + Handler: admission.NewHandler(admission.Create), + }, nil +} diff --git a/openshift-kube-apiserver/admission/scheduler/nodeenv/admission_test.go b/openshift-kube-apiserver/admission/scheduler/nodeenv/admission_test.go new file mode 100644 index 0000000000000..b3d058ba34a2f --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/nodeenv/admission_test.go @@ -0,0 +1,211 @@ +package nodeenv + +import ( + "context" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/admission" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + kapi "k8s.io/kubernetes/pkg/apis/core" + + projectv1 "github.com/openshift/api/project/v1" + "github.com/openshift/apiserver-library-go/pkg/labelselector" +) + +// TestPodAdmission verifies various scenarios involving pod/project/global node label selectors +func TestPodAdmission(t *testing.T) { + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testProject", + Namespace: "", + }, + } + + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "worker-1", + Namespace: "", + Labels: map[string]string{ + "worker": "true", + }, + }, + } + + handler := &podNodeEnvironment{} + pod := &kapi.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "testPod"}, + } + + tests := []struct { + defaultNodeSelector string + projectNodeSelector string + podNodeSelector map[string]string + podNodeName string + mergedNodeSelector map[string]string + ignoreProjectNodeSelector bool + admit bool + testName string + }{ + { + defaultNodeSelector: "", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{}, + ignoreProjectNodeSelector: true, + admit: true, + testName: "No node selectors", + }, + { + defaultNodeSelector: "infra = false", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{"infra": "false"}, + ignoreProjectNodeSelector: true, + admit: true, + testName: "Default node selector and no conflicts", + }, + { + defaultNodeSelector: "", + projectNodeSelector: "infra = false", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{"infra": "false"}, + admit: true, + testName: "Project node selector and no conflicts", + }, + { + defaultNodeSelector: "infra = false", + projectNodeSelector: "", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{}, + admit: true, + testName: "Empty project node selector and no conflicts", + }, + { + defaultNodeSelector: "infra = false", + projectNodeSelector: "infra=true", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{"infra": "true"}, + admit: true, + testName: "Default and project node selector, no conflicts", + }, + { + defaultNodeSelector: "infra = false", + projectNodeSelector: "infra=true", + podNodeSelector: map[string]string{"env": "test"}, + mergedNodeSelector: map[string]string{"infra": "true", "env": "test"}, + admit: true, + testName: "Project and pod node selector, no conflicts", + }, + { + defaultNodeSelector: "env = test", + projectNodeSelector: "infra=true", + podNodeSelector: map[string]string{"infra": "false"}, + mergedNodeSelector: map[string]string{"infra": "false"}, + admit: false, + testName: "Conflicting pod and project node selector, one label", + }, + { + defaultNodeSelector: "env=dev", + projectNodeSelector: "infra=false, env = test", + podNodeSelector: map[string]string{"env": "dev", "color": "blue"}, + mergedNodeSelector: map[string]string{"env": "dev", "color": "blue"}, + admit: false, + testName: "Conflicting pod and project node selector, multiple labels", + }, + { + defaultNodeSelector: "", + projectNodeSelector: "worker=true", + podNodeName: "worker-1", + podNodeSelector: nil, + mergedNodeSelector: map[string]string{"worker": "true"}, + admit: true, + testName: "node referenced in pod.nodeName does not conflict with project node selector", + }, + { + defaultNodeSelector: "", + projectNodeSelector: "", + podNodeName: "worker-1", + podNodeSelector: map[string]string{"worker": "false"}, + mergedNodeSelector: map[string]string{"worker": "false"}, + admit: true, + // default to kube behavior: let this fail by kubelet + testName: "node referenced in pod spec.nodeName can conflict with its own node selector when no project node selector is specified", + }, + { + defaultNodeSelector: "worker = true", + projectNodeSelector: "worker=false", + podNodeName: "worker-1", + podNodeSelector: nil, + mergedNodeSelector: nil, + admit: false, + testName: "node referenced in pod spec.nodeName conflicts with project node selector", + }, + { + defaultNodeSelector: "", + projectNodeSelector: "worker=true", + podNodeName: "worker-2", + podNodeSelector: nil, + mergedNodeSelector: nil, + admit: false, + testName: "missing node referenced in pod spec.nodeName does not admit", + }, + } + for _, test := range tests { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + indexer.Add(namespace) + indexer.Add(node) + handler.nsLister = corev1listers.NewNamespaceLister(indexer) + handler.nsListerSynced = func() bool { return true } + handler.nodeLister = corev1listers.NewNodeLister(indexer) + handler.nodeListerSynced = func() bool { return true } + handler.defaultNodeSelector = test.defaultNodeSelector + + if !test.ignoreProjectNodeSelector { + namespace.ObjectMeta.Annotations = map[string]string{projectv1.ProjectNodeSelector: test.projectNodeSelector} + } + pod.Spec = kapi.PodSpec{NodeSelector: test.podNodeSelector, NodeName: test.podNodeName} + + attrs := admission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), "testProject", namespace.ObjectMeta.Name, kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, nil) + err := handler.Admit(context.TODO(), attrs, nil) + if test.admit && err != nil { + t.Errorf("Test: %s, expected no error but got: %s", test.testName, err) + } else if !test.admit && err == nil { + t.Errorf("Test: %s, expected an error", test.testName) + } else if err == nil { + if err := handler.Validate(context.TODO(), attrs, nil); err != nil { + t.Errorf("Test: %s, unexpected Validate error after Admit succeeded: %v", test.testName, err) + } + } + + if !labelselector.Equals(test.mergedNodeSelector, pod.Spec.NodeSelector) { + t.Errorf("Test: %s, expected: %s but got: %s", test.testName, test.mergedNodeSelector, pod.Spec.NodeSelector) + } else if len(test.projectNodeSelector) > 0 { + firstProjectKey := strings.TrimSpace(strings.Split(test.projectNodeSelector, "=")[0]) + delete(pod.Spec.NodeSelector, firstProjectKey) + if err := handler.Validate(context.TODO(), attrs, nil); err == nil { + t.Errorf("Test: %s, expected Validate error after removing project key %q", test.testName, firstProjectKey) + } + } + } +} + +func TestHandles(t *testing.T) { + for op, shouldHandle := range map[admission.Operation]bool{ + admission.Create: true, + admission.Update: false, + admission.Connect: false, + admission.Delete: false, + } { + nodeEnvionment, err := NewPodNodeEnvironment() + if err != nil { + t.Errorf("%v: error getting node environment: %v", op, err) + continue + } + + if e, a := shouldHandle, nodeEnvionment.Handles(op); e != a { + t.Errorf("%v: shouldHandle=%t, handles=%t", op, e, a) + } + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/nodeenv/intializers.go b/openshift-kube-apiserver/admission/scheduler/nodeenv/intializers.go new file mode 100644 index 0000000000000..534905cb06120 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/nodeenv/intializers.go @@ -0,0 +1,28 @@ +package nodeenv + +import ( + "k8s.io/apiserver/pkg/admission" +) + +func NewInitializer(defaultNodeSelector string) admission.PluginInitializer { + return &localInitializer{ + defaultNodeSelector: defaultNodeSelector, + } +} + +type WantsDefaultNodeSelector interface { + SetDefaultNodeSelector(string) + admission.InitializationValidator +} + +type localInitializer struct { + defaultNodeSelector string +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsDefaultNodeSelector); ok { + wants.SetDefaultNodeSelector(i.defaultNodeSelector) + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission.go b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission.go new file mode 100644 index 0000000000000..05ef26277fcac --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission.go @@ -0,0 +1,205 @@ +package podnodeconstraints + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/klog/v2" + coreapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/auth/nodeidentifier" + + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1" +) + +const PluginName = "scheduling.openshift.io/PodNodeConstraints" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", PluginName) + return nil, nil + } + return NewPodNodeConstraints(pluginConfig, nodeidentifier.NewDefaultNodeIdentifier()), nil + }) +} + +// NewPodNodeConstraints creates a new admission plugin to prevent objects that contain pod templates +// from containing node bindings by name or selector based on role permissions. +func NewPodNodeConstraints(config *podnodeconstraints.PodNodeConstraintsConfig, nodeIdentifier nodeidentifier.NodeIdentifier) admission.Interface { + plugin := podNodeConstraints{ + config: config, + Handler: admission.NewHandler(admission.Create, admission.Update), + nodeIdentifier: nodeIdentifier, + } + if config != nil { + plugin.selectorLabelBlacklist = sets.NewString(config.NodeSelectorLabelBlacklist...) + } + + return &plugin +} + +type podNodeConstraints struct { + *admission.Handler + selectorLabelBlacklist sets.String + config *podnodeconstraints.PodNodeConstraintsConfig + authorizer authorizer.Authorizer + nodeIdentifier nodeidentifier.NodeIdentifier +} + +var _ = initializer.WantsAuthorizer(&podNodeConstraints{}) +var _ = admission.ValidationInterface(&podNodeConstraints{}) + +func shouldCheckResource(resource schema.GroupResource, kind schema.GroupKind) (bool, error) { + expectedKind, shouldCheck := resourcesToCheck[resource] + if !shouldCheck { + return false, nil + } + if expectedKind != kind { + return false, fmt.Errorf("Unexpected resource kind %v for resource %v", &kind, &resource) + } + return true, nil +} + +// resourcesToCheck is a map of resources and corresponding kinds of things that we want handled in this plugin +var resourcesToCheck = map[schema.GroupResource]schema.GroupKind{ + coreapi.Resource("pods"): coreapi.Kind("Pod"), +} + +func readConfig(reader io.Reader) (*podnodeconstraints.PodNodeConstraintsConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, podnodeconstraints.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*podnodeconstraints.PodNodeConstraintsConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + // No validation needed since config is just list of strings + return config, nil +} + +func (o *podNodeConstraints) Validate(ctx context.Context, attr admission.Attributes, _ admission.ObjectInterfaces) error { + switch { + case o.config == nil, + attr.GetSubresource() != "": + return nil + } + shouldCheck, err := shouldCheckResource(attr.GetResource().GroupResource(), attr.GetKind().GroupKind()) + if err != nil { + return err + } + if !shouldCheck { + return nil + } + // Only check Create operation on pods + if attr.GetResource().GroupResource() == coreapi.Resource("pods") && attr.GetOperation() != admission.Create { + return nil + } + + return o.validatePodSpec(ctx, attr, attr.GetObject().(*coreapi.Pod).Spec) +} + +// validate PodSpec if NodeName or NodeSelector are specified +func (o *podNodeConstraints) validatePodSpec(ctx context.Context, attr admission.Attributes, ps coreapi.PodSpec) error { + // a node creating a mirror pod that targets itself is allowed + // see the NodeRestriction plugin for further details + if o.isNodeSelfTargetWithMirrorPod(attr, ps.NodeName) { + return nil + } + + matchingLabels := []string{} + // nodeSelector blacklist filter + for nodeSelectorLabel := range ps.NodeSelector { + if o.selectorLabelBlacklist.Has(nodeSelectorLabel) { + matchingLabels = append(matchingLabels, nodeSelectorLabel) + } + } + // nodeName constraint + if len(ps.NodeName) > 0 || len(matchingLabels) > 0 { + allow, err := o.checkPodsBindAccess(ctx, attr) + if err != nil { + return err + } + if !allow { + switch { + case len(ps.NodeName) > 0 && len(matchingLabels) == 0: + return admission.NewForbidden(attr, fmt.Errorf("node selection by nodeName is prohibited by policy for your role")) + case len(ps.NodeName) == 0 && len(matchingLabels) > 0: + return admission.NewForbidden(attr, fmt.Errorf("node selection by label(s) %v is prohibited by policy for your role", matchingLabels)) + case len(ps.NodeName) > 0 && len(matchingLabels) > 0: + return admission.NewForbidden(attr, fmt.Errorf("node selection by nodeName and label(s) %v is prohibited by policy for your role", matchingLabels)) + } + } + } + return nil +} + +func (o *podNodeConstraints) SetAuthorizer(a authorizer.Authorizer) { + o.authorizer = a +} + +func (o *podNodeConstraints) ValidateInitialization() error { + if o.authorizer == nil { + return fmt.Errorf("%s requires an authorizer", PluginName) + } + if o.nodeIdentifier == nil { + return fmt.Errorf("%s requires a node identifier", PluginName) + } + return nil +} + +// build LocalSubjectAccessReview struct to validate role via checkAccess +func (o *podNodeConstraints) checkPodsBindAccess(ctx context.Context, attr admission.Attributes) (bool, error) { + authzAttr := authorizer.AttributesRecord{ + User: attr.GetUserInfo(), + Verb: "create", + Namespace: attr.GetNamespace(), + Resource: "pods", + Subresource: "binding", + APIGroup: coreapi.GroupName, + ResourceRequest: true, + } + if attr.GetResource().GroupResource() == coreapi.Resource("pods") { + authzAttr.Name = attr.GetName() + } + authorized, _, err := o.authorizer.Authorize(ctx, authzAttr) + return authorized == authorizer.DecisionAllow, err +} + +func (o *podNodeConstraints) isNodeSelfTargetWithMirrorPod(attr admission.Attributes, nodeName string) bool { + // make sure we are actually trying to target a node + if len(nodeName) == 0 { + return false + } + // this check specifically requires the object to be pod (unlike the other checks where we want any pod spec) + pod, ok := attr.GetObject().(*coreapi.Pod) + if !ok { + return false + } + // note that anyone can create a mirror pod, but they are not privileged in any way + // they are actually highly constrained since they cannot reference secrets + // nodes can only create and delete them, and they will delete any "orphaned" mirror pods + if _, isMirrorPod := pod.Annotations[coreapi.MirrorPodAnnotationKey]; !isMirrorPod { + return false + } + // we are targeting a node with a mirror pod + // confirm the user is a node that is targeting itself + actualNodeName, isNode := o.nodeIdentifier.NodeIdentity(attr.GetUserInfo()) + return isNode && actualNodeName == nodeName +} diff --git a/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission_test.go b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission_test.go new file mode 100644 index 0000000000000..a5587c5d0ee88 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission_test.go @@ -0,0 +1,283 @@ +package podnodeconstraints + +import ( + "bytes" + "context" + "fmt" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + kapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/auth/nodeidentifier" + + authorizationv1 "github.com/openshift/api/authorization/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints" +) + +func TestPodNodeConstraints(t *testing.T) { + ns := metav1.NamespaceDefault + tests := []struct { + config *podnodeconstraints.PodNodeConstraintsConfig + resource runtime.Object + kind schema.GroupKind + groupresource schema.GroupResource + userinfo user.Info + reviewResponse *authorizationv1.SubjectAccessReviewResponse + expectedResource string + expectedErrorMsg string + }{ + // 0: expect unspecified defaults to not error + { + config: emptyConfig(), + resource: defaultPod(), + userinfo: serviceaccount.UserInfo("", "", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "", + }, + // 1: expect nodeSelector to error with user which lacks "pods/binding" access + { + config: testConfig(), + resource: nodeSelectorPod(), + userinfo: serviceaccount.UserInfo("", "", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "node selection by label(s) [bogus] is prohibited by policy for your role", + }, + // 2: expect nodeName to fail with user that lacks "pods/binding" access + { + config: testConfig(), + resource: nodeNamePod(), + userinfo: serviceaccount.UserInfo("herpy", "derpy", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "node selection by nodeName is prohibited by policy for your role", + }, + // 3: expect nodeName and nodeSelector to fail with user that lacks "pods/binding" access + { + config: testConfig(), + resource: nodeNameNodeSelectorPod(), + userinfo: serviceaccount.UserInfo("herpy", "derpy", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "node selection by nodeName and label(s) [bogus] is prohibited by policy for your role", + }, + // 4: expect nodeSelector to succeed with user that has "pods/binding" access + { + config: testConfig(), + resource: nodeSelectorPod(), + userinfo: serviceaccount.UserInfo("openshift-infra", "daemonset-controller", ""), + reviewResponse: reviewResponse(true, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "", + }, + // 5: expect nodeName to succeed with user that has "pods/binding" access + { + config: testConfig(), + resource: nodeNamePod(), + userinfo: serviceaccount.UserInfo("openshift-infra", "daemonset-controller", ""), + reviewResponse: reviewResponse(true, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "", + }, + // 6: expect nil config to bypass admission + { + config: nil, + resource: defaultPod(), + userinfo: serviceaccount.UserInfo("", "", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "", + }, + // 7: expect nodeName to succeed with node user self targeting mirror pod + { + config: testConfig(), + resource: nodeNameMirrorPod(), + userinfo: &user.DefaultInfo{Name: "system:node:frank", Groups: []string{user.NodesGroup}}, + expectedErrorMsg: "", + }, + // 8: expect nodeName to fail with node user self targeting non-mirror pod + { + config: testConfig(), + resource: nodeNamePod(), + userinfo: &user.DefaultInfo{Name: "system:node:frank", Groups: []string{user.NodesGroup}}, + expectedErrorMsg: "node selection by nodeName is prohibited by policy for your role", + }, + // 9: expect nodeName to fail with node user non-self targeting mirror pod + { + config: testConfig(), + resource: nodeNameMirrorPod(), + userinfo: &user.DefaultInfo{Name: "system:node:bob", Groups: []string{user.NodesGroup}}, + expectedErrorMsg: "node selection by nodeName is prohibited by policy for your role", + }, + // 10: expect nodeName to fail with node user non-self targeting non-mirror pod + { + config: testConfig(), + resource: nodeNamePod(), + userinfo: &user.DefaultInfo{Name: "system:node:bob", Groups: []string{user.NodesGroup}}, + expectedErrorMsg: "node selection by nodeName is prohibited by policy for your role", + }, + } + for i, tc := range tests { + var expectedError error + errPrefix := fmt.Sprintf("%d", i) + prc := NewPodNodeConstraints(tc.config, nodeidentifier.NewDefaultNodeIdentifier()) + prc.(initializer.WantsAuthorizer).SetAuthorizer(fakeAuthorizer(t)) + err := prc.(admission.InitializationValidator).ValidateInitialization() + if err != nil { + checkAdmitError(t, err, expectedError, errPrefix) + continue + } + attrs := admission.NewAttributesRecord(tc.resource, nil, kapi.Kind("Pod").WithVersion("version"), ns, "test", kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, tc.userinfo) + if tc.expectedErrorMsg != "" { + expectedError = admission.NewForbidden(attrs, fmt.Errorf(tc.expectedErrorMsg)) + } + err = prc.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil) + checkAdmitError(t, err, expectedError, errPrefix) + } +} + +func TestPodNodeConstraintsPodUpdate(t *testing.T) { + ns := metav1.NamespaceDefault + var expectedError error + errPrefix := "PodUpdate" + prc := NewPodNodeConstraints(testConfig(), nodeidentifier.NewDefaultNodeIdentifier()) + prc.(initializer.WantsAuthorizer).SetAuthorizer(fakeAuthorizer(t)) + err := prc.(admission.InitializationValidator).ValidateInitialization() + if err != nil { + checkAdmitError(t, err, expectedError, errPrefix) + return + } + attrs := admission.NewAttributesRecord(nodeNamePod(), nodeNamePod(), kapi.Kind("Pod").WithVersion("version"), ns, "test", kapi.Resource("pods").WithVersion("version"), "", admission.Update, nil, false, serviceaccount.UserInfo("", "", "")) + err = prc.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil) + checkAdmitError(t, err, expectedError, errPrefix) +} + +func TestPodNodeConstraintsNonHandledResources(t *testing.T) { + ns := metav1.NamespaceDefault + errPrefix := "ResourceQuotaTest" + var expectedError error + prc := NewPodNodeConstraints(testConfig(), nodeidentifier.NewDefaultNodeIdentifier()) + prc.(initializer.WantsAuthorizer).SetAuthorizer(fakeAuthorizer(t)) + err := prc.(admission.InitializationValidator).ValidateInitialization() + if err != nil { + checkAdmitError(t, err, expectedError, errPrefix) + return + } + attrs := admission.NewAttributesRecord(resourceQuota(), nil, kapi.Kind("ResourceQuota").WithVersion("version"), ns, "test", kapi.Resource("resourcequotas").WithVersion("version"), "", admission.Create, nil, false, serviceaccount.UserInfo("", "", "")) + err = prc.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil) + checkAdmitError(t, err, expectedError, errPrefix) +} + +func emptyConfig() *podnodeconstraints.PodNodeConstraintsConfig { + return &podnodeconstraints.PodNodeConstraintsConfig{} +} + +func testConfig() *podnodeconstraints.PodNodeConstraintsConfig { + return &podnodeconstraints.PodNodeConstraintsConfig{ + NodeSelectorLabelBlacklist: []string{"bogus"}, + } +} + +func defaultPod() *kapi.Pod { + pod := &kapi.Pod{} + return pod +} + +func nodeNameNodeSelectorPod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.NodeName = "frank" + pod.Spec.NodeSelector = map[string]string{"bogus": "frank"} + return pod +} + +func nodeNamePod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.NodeName = "frank" + return pod +} + +func nodeNameMirrorPod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Annotations = map[string]string{kapi.MirrorPodAnnotationKey: "true"} + pod.Spec.NodeName = "frank" + return pod +} + +func nodeSelectorPod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.NodeSelector = map[string]string{"bogus": "frank"} + return pod +} + +func resourceQuota() runtime.Object { + rq := &kapi.ResourceQuota{} + return rq +} + +func checkAdmitError(t *testing.T, err error, expectedError error, prefix string) { + switch { + case expectedError == nil && err == nil: + // continue + case expectedError != nil && err != nil && err.Error() != expectedError.Error(): + t.Errorf("%s: expected error %q, got: %q", prefix, expectedError.Error(), err.Error()) + case expectedError == nil && err != nil: + t.Errorf("%s: expected no error, got: %q", prefix, err.Error()) + case expectedError != nil && err == nil: + t.Errorf("%s: expected error %q, no error received", prefix, expectedError.Error()) + } +} + +type fakeTestAuthorizer struct { + t *testing.T +} + +func fakeAuthorizer(t *testing.T) authorizer.Authorizer { + return &fakeTestAuthorizer{ + t: t, + } +} + +func (a *fakeTestAuthorizer) Authorize(_ context.Context, attributes authorizer.Attributes) (authorizer.Decision, string, error) { + ui := attributes.GetUser() + if ui == nil { + return authorizer.DecisionNoOpinion, "", fmt.Errorf("No valid UserInfo for Context") + } + // User with pods/bindings. permission: + if ui.GetName() == "system:serviceaccount:openshift-infra:daemonset-controller" { + return authorizer.DecisionAllow, "", nil + } + // User without pods/bindings. permission: + return authorizer.DecisionNoOpinion, "", nil +} + +func reviewResponse(allowed bool, msg string) *authorizationv1.SubjectAccessReviewResponse { + return &authorizationv1.SubjectAccessReviewResponse{ + Allowed: allowed, + Reason: msg, + } +} + +func TestReadConfig(t *testing.T) { + configStr := `apiVersion: scheduling.openshift.io/v1 +kind: PodNodeConstraintsConfig +nodeSelectorLabelBlacklist: + - bogus + - foo +` + buf := bytes.NewBufferString(configStr) + config, err := readConfig(buf) + if err != nil { + t.Fatalf("unexpected error reading config: %v", err) + } + if len(config.NodeSelectorLabelBlacklist) == 0 { + t.Fatalf("NodeSelectorLabelBlacklist didn't take specified value") + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/doc.go b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/doc.go new file mode 100644 index 0000000000000..dfdf50a8102f0 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/doc.go @@ -0,0 +1,44 @@ +/* +Package podnodeconstraints contains the PodNodeConstraints admission +control plugin. This plugin allows administrators to set policy +governing the use of the NodeName and NodeSelector attributes in pod +specs. + +Enabling this plugin will prevent the use of the NodeName field in Pod +templates for users and serviceaccounts which lack the "pods/binding" +permission, and which don't belong to groups which have the +"pods/binding" permission. + +This plugin will also prevent users, serviceaccounts and groups which +lack the "pods/binding" permission from specifying the NodeSelector field +in Pod templates for labels which appear in the +nodeSelectorLabelBlacklist list field. + +Configuration + +The plugin is configured via a PodNodeConstraintsConfig object in the +origin and kubernetes Master configs: + +admissionConfig: + pluginConfig: + PodNodeConstraints: + configuration: + apiVersion: v1 + kind: PodNodeConstraintsConfig + nodeSelectorLabelBlacklist: + - label1 + - label2 +... +kubernetesMasterConfig: + admissionConfig: + pluginConfig: + PodNodeConstraints: + configuration: + apiVersion: v1 + kind: PodNodeConstraintsConfig + nodeSelectorLabelBlacklist: + - label1 + - label2 +*/ + +package podnodeconstraints diff --git a/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/admission.go b/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/admission.go new file mode 100644 index 0000000000000..35e249acc3602 --- /dev/null +++ b/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/admission.go @@ -0,0 +1,281 @@ +package csiinlinevolumesecurity + +import ( + "context" + "fmt" + "io" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/warning" + "k8s.io/client-go/informers" + corev1listers "k8s.io/client-go/listers/core/v1" + storagev1listers "k8s.io/client-go/listers/storage/v1" + "k8s.io/klog/v2" + appsapi "k8s.io/kubernetes/pkg/apis/apps" + batchapi "k8s.io/kubernetes/pkg/apis/batch" + coreapi "k8s.io/kubernetes/pkg/apis/core" + podsecapi "k8s.io/pod-security-admission/api" +) + +const ( + // Plugin name + PluginName = "storage.openshift.io/CSIInlineVolumeSecurity" + // Label on the CSIDriver to declare the driver's effective pod security profile + csiInlineVolProfileLabel = "security.openshift.io/csi-ephemeral-volume-profile" + // Default values for the profile labels when no such label exists + defaultCSIInlineVolProfile = podsecapi.LevelPrivileged + defaultPodSecEnforceProfile = podsecapi.LevelRestricted + defaultPodSecWarnProfile = podsecapi.LevelRestricted + defaultPodSecAuditProfile = podsecapi.LevelRestricted + // Format string used for audit/warn/enforce response messages + admissionResponseFormatStr = "%s uses an inline volume provided by CSIDriver %s and namespace %s has a pod security %s level that is lower than %s" +) + +var ( + podSpecResources = map[schema.GroupResource]bool{ + coreapi.Resource("pods"): true, + coreapi.Resource("replicationcontrollers"): true, + coreapi.Resource("podtemplates"): true, + appsapi.Resource("replicasets"): true, + appsapi.Resource("deployments"): true, + appsapi.Resource("statefulsets"): true, + appsapi.Resource("daemonsets"): true, + batchapi.Resource("jobs"): true, + batchapi.Resource("cronjobs"): true, + } +) + +var _ = initializer.WantsExternalKubeInformerFactory(&csiInlineVolSec{}) +var _ = admission.ValidationInterface(&csiInlineVolSec{}) + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, + func(config io.Reader) (admission.Interface, error) { + return &csiInlineVolSec{ + Handler: admission.NewHandler(admission.Create), + }, nil + }) +} + +// csiInlineVolSec validates whether the namespace has permission to use a given +// CSI driver as an inline volume. +type csiInlineVolSec struct { + *admission.Handler + //enabled bool + //inspectedFeatureGates bool + defaultPolicy podsecapi.Policy + nsLister corev1listers.NamespaceLister + nsListerSynced func() bool + csiDriverLister storagev1listers.CSIDriverLister + csiDriverListSynced func() bool + podSpecExtractor PodSpecExtractor +} + +// SetExternalKubeInformerFactory registers an informer +func (c *csiInlineVolSec) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + c.nsLister = kubeInformers.Core().V1().Namespaces().Lister() + c.nsListerSynced = kubeInformers.Core().V1().Namespaces().Informer().HasSynced + c.csiDriverLister = kubeInformers.Storage().V1().CSIDrivers().Lister() + c.csiDriverListSynced = kubeInformers.Storage().V1().CSIDrivers().Informer().HasSynced + c.podSpecExtractor = &OCPPodSpecExtractor{} + c.SetReadyFunc(func() bool { + return c.nsListerSynced() && c.csiDriverListSynced() + }) + + // set default pod security policy + c.defaultPolicy = podsecapi.Policy{ + Enforce: podsecapi.LevelVersion{ + Level: defaultPodSecEnforceProfile, + Version: podsecapi.GetAPIVersion(), + }, + Warn: podsecapi.LevelVersion{ + Level: defaultPodSecWarnProfile, + Version: podsecapi.GetAPIVersion(), + }, + Audit: podsecapi.LevelVersion{ + Level: defaultPodSecAuditProfile, + Version: podsecapi.GetAPIVersion(), + }, + } +} + +func (c *csiInlineVolSec) ValidateInitialization() error { + if c.nsLister == nil { + return fmt.Errorf("%s plugin needs a namespace lister", PluginName) + } + if c.nsListerSynced == nil { + return fmt.Errorf("%s plugin needs a namespace lister synced", PluginName) + } + if c.csiDriverLister == nil { + return fmt.Errorf("%s plugin needs a node lister", PluginName) + } + if c.csiDriverListSynced == nil { + return fmt.Errorf("%s plugin needs a node lister synced", PluginName) + } + if c.podSpecExtractor == nil { + return fmt.Errorf("%s plugin needs a pod spec extractor", PluginName) + } + return nil +} + +func (c *csiInlineVolSec) PolicyToEvaluate(labels map[string]string) (podsecapi.Policy, field.ErrorList) { + return podsecapi.PolicyToEvaluate(labels, c.defaultPolicy) +} + +func (c *csiInlineVolSec) Validate(ctx context.Context, attrs admission.Attributes, o admission.ObjectInterfaces) error { + // Only validate applicable resources + gr := attrs.GetResource().GroupResource() + if !podSpecResources[gr] { + return nil + } + // Do not validate subresources + if attrs.GetSubresource() != "" { + return nil + } + + // Get namespace + namespace, err := c.nsLister.Get(attrs.GetNamespace()) + if err != nil { + return admission.NewForbidden(attrs, fmt.Errorf("failed to get namespace: %v", err)) + } + // Require valid labels if they exist (the default policy is always valid) + nsPolicy, nsPolicyErrs := c.PolicyToEvaluate(namespace.Labels) + if len(nsPolicyErrs) > 0 { + return admission.NewForbidden(attrs, fmt.Errorf("invalid policy found on namespace %s: %v", namespace, nsPolicyErrs)) + } + // If the namespace policy is fully privileged, no need to evaluate further + // because it is allowed to use any inline volumes. + if nsPolicy.FullyPrivileged() { + return nil + } + + // Extract the pod spec to evaluate + obj := attrs.GetObject() + _, podSpec, err := c.podSpecExtractor.ExtractPodSpec(obj) + if err != nil { + return admission.NewForbidden(attrs, fmt.Errorf("failed to extract pod spec: %v", err)) + } + // If an object with an optional pod spec does not contain a pod spec, skip validation + if podSpec == nil { + return nil + } + + klogV := klog.V(5) + if klogV.Enabled() { + klogV.InfoS("CSIInlineVolumeSecurity evaluation", "policy", fmt.Sprintf("%v", nsPolicy), "op", attrs.GetOperation(), "resource", attrs.GetResource(), "namespace", attrs.GetNamespace(), "name", attrs.GetName()) + } + + // For each inline volume, find the CSIDriver and ensure the profile on the + // driver is allowed by the pod security profile on the namespace. + // If it is not: create errors, warnings, and audit as defined by policy. + for _, vol := range podSpec.Volumes { + // Only check for inline volumes + if vol.CSI == nil { + continue + } + + // Get the policy level for the CSIDriver + driverName := vol.CSI.Driver + driverLevel, err := c.getCSIDriverLevel(driverName) + if err != nil { + return admission.NewForbidden(attrs, err) + } + + // Compare CSIDriver level to the policy for the namespace + if podsecapi.CompareLevels(nsPolicy.Enforce.Level, driverLevel) > 0 { + // Not permitted, enforce error and deny admission + return admission.NewForbidden(attrs, fmt.Errorf(admissionResponseFormatStr, attrs.GetName(), driverName, attrs.GetNamespace(), "enforce", driverLevel)) + } + if podsecapi.CompareLevels(nsPolicy.Warn.Level, driverLevel) > 0 { + // Violates policy warn level, add warning + warning.AddWarning(ctx, "", fmt.Sprintf(admissionResponseFormatStr, attrs.GetName(), driverName, attrs.GetNamespace(), "warn", driverLevel)) + } + if podsecapi.CompareLevels(nsPolicy.Audit.Level, driverLevel) > 0 { + // Violates policy audit level, add audit annotation + auditMessageString := fmt.Sprintf(admissionResponseFormatStr, attrs.GetName(), driverName, attrs.GetNamespace(), "audit", driverLevel) + audit.AddAuditAnnotation(ctx, PluginName, auditMessageString) + } + } + + return nil +} + +// getCSIDriverLevel returns the effective policy level for the CSIDriver. +// If the driver is found and it has the label, use that policy. +// If the driver or the label is missing, default to the privileged policy. +func (c *csiInlineVolSec) getCSIDriverLevel(driverName string) (podsecapi.Level, error) { + driverLevel := defaultCSIInlineVolProfile + driver, err := c.csiDriverLister.Get(driverName) + if err != nil { + return driverLevel, nil + } + + csiDriverLabel, ok := driver.ObjectMeta.Labels[csiInlineVolProfileLabel] + if !ok { + return driverLevel, nil + } + + driverLevel, err = podsecapi.ParseLevel(csiDriverLabel) + if err != nil { + return driverLevel, fmt.Errorf("invalid label %s for CSIDriver %s: %v", csiInlineVolProfileLabel, driverName, err) + } + + return driverLevel, nil +} + +// PodSpecExtractor extracts a PodSpec from pod-controller resources that embed a PodSpec. +// This is the same as what is used in the pod-security-admission plugin (see +// staging/src/k8s.io/pod-security-admission/admission/admission.go) except here we +// are provided coreapi resources instead of corev1, which changes the interface. +type PodSpecExtractor interface { + // HasPodSpec returns true if the given resource type MAY contain an extractable PodSpec. + HasPodSpec(schema.GroupResource) bool + // ExtractPodSpec returns a pod spec and metadata to evaluate from the object. + // An error returned here does not block admission of the pod-spec-containing object and is not returned to the user. + // If the object has no pod spec, return `nil, nil, nil`. + ExtractPodSpec(runtime.Object) (*metav1.ObjectMeta, *coreapi.PodSpec, error) +} + +type OCPPodSpecExtractor struct{} + +func (OCPPodSpecExtractor) HasPodSpec(gr schema.GroupResource) bool { + return podSpecResources[gr] +} + +func (OCPPodSpecExtractor) ExtractPodSpec(obj runtime.Object) (*metav1.ObjectMeta, *coreapi.PodSpec, error) { + switch o := obj.(type) { + case *coreapi.Pod: + return &o.ObjectMeta, &o.Spec, nil + case *coreapi.PodTemplate: + return extractPodSpecFromTemplate(&o.Template) + case *coreapi.ReplicationController: + return extractPodSpecFromTemplate(o.Spec.Template) + case *appsapi.ReplicaSet: + return extractPodSpecFromTemplate(&o.Spec.Template) + case *appsapi.Deployment: + return extractPodSpecFromTemplate(&o.Spec.Template) + case *appsapi.DaemonSet: + return extractPodSpecFromTemplate(&o.Spec.Template) + case *appsapi.StatefulSet: + return extractPodSpecFromTemplate(&o.Spec.Template) + case *batchapi.Job: + return extractPodSpecFromTemplate(&o.Spec.Template) + case *batchapi.CronJob: + return extractPodSpecFromTemplate(&o.Spec.JobTemplate.Spec.Template) + default: + return nil, nil, fmt.Errorf("unexpected object type: %s", obj.GetObjectKind().GroupVersionKind().String()) + } +} + +func extractPodSpecFromTemplate(template *coreapi.PodTemplateSpec) (*metav1.ObjectMeta, *coreapi.PodSpec, error) { + if template == nil { + return nil, nil, nil + } + return &template.ObjectMeta, &template.Spec, nil +} diff --git a/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/admission_test.go b/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/admission_test.go new file mode 100644 index 0000000000000..d69a03256a5b1 --- /dev/null +++ b/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/admission_test.go @@ -0,0 +1,508 @@ +package csiinlinevolumesecurity + +import ( + "context" + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + corev1listers "k8s.io/client-go/listers/core/v1" + storagev1listers "k8s.io/client-go/listers/storage/v1" + "k8s.io/client-go/tools/cache" + appsapi "k8s.io/kubernetes/pkg/apis/apps" + batchapi "k8s.io/kubernetes/pkg/apis/batch" + coreapi "k8s.io/kubernetes/pkg/apis/core" + podsecapi "k8s.io/pod-security-admission/api" +) + +const ( + defaultNamespaceName = "test-namespace" + defaultCSIDriverName = "test-driver" + + // expected error string when privileged namespace is required + privNamespaceRequiredError = "has a pod security enforce level that is lower than privileged" +) + +func getMockCSIInlineVolSec(namespace *corev1.Namespace, driver *storagev1.CSIDriver) (*csiInlineVolSec, error) { + c := &csiInlineVolSec{ + Handler: admission.NewHandler(admission.Create), + defaultPolicy: podsecapi.Policy{ + Enforce: podsecapi.LevelVersion{ + Level: defaultPodSecEnforceProfile, + Version: podsecapi.GetAPIVersion(), + }, + Warn: podsecapi.LevelVersion{ + Level: defaultPodSecWarnProfile, + Version: podsecapi.GetAPIVersion(), + }, + Audit: podsecapi.LevelVersion{ + Level: defaultPodSecAuditProfile, + Version: podsecapi.GetAPIVersion(), + }, + }, + nsLister: fakeNamespaceLister(namespace), + nsListerSynced: func() bool { return true }, + csiDriverLister: fakeCSIDriverLister(driver), + csiDriverListSynced: func() bool { return true }, + podSpecExtractor: &OCPPodSpecExtractor{}, + } + if err := c.ValidateInitialization(); err != nil { + return nil, err + } + + return c, nil +} + +func fakeNamespaceLister(ns *corev1.Namespace) corev1listers.NamespaceLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + _ = indexer.Add(ns) + return corev1listers.NewNamespaceLister(indexer) +} + +func fakeCSIDriverLister(driver *storagev1.CSIDriver) storagev1listers.CSIDriverLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + _ = indexer.Add(driver) + return storagev1listers.NewCSIDriverLister(indexer) +} + +func TestValidate(t *testing.T) { + type TestStruct struct { + name string + obj runtime.Object + namespace *corev1.Namespace + driver *storagev1.CSIDriver + expectedError error + } + + tests := []TestStruct{ + { + name: "should allow pods with no volumes", + obj: testPod(), + namespace: testNamespaceNoLabels(), + driver: testCSIDriverNoLabels(), + }, + { + name: "should allow pods with inline volumes in a baseline namespace when the driver uses the baseline label", + obj: testPodWithInlineVol(), + namespace: testNamespaceBaseline(), + driver: testCSIDriverBaseline(), + }, + { + name: "should allow pods with inline volumes in a baseline namespace when the driver uses the restricted label", + obj: testPodWithInlineVol(), + namespace: testNamespaceBaseline(), + driver: testCSIDriverRestricted(), + }, + { + name: "should deny pod admission with inline volumes if the CSI driver is not found and namespace is restricted", + obj: testPodWithInvalidDriverName(), + namespace: testNamespaceRestricted(), + driver: testCSIDriverRestricted(), + expectedError: fmt.Errorf(privNamespaceRequiredError), + }, + { + name: "should allow pod admission with inline volumes if the CSI driver is not found and namespace is privileged", + obj: testPodWithInvalidDriverName(), + namespace: testNamespacePrivileged(), + driver: testCSIDriverRestricted(), + }, + { + name: "should deny pod admission if the CSI driver has an invalid profile label", + obj: testPodWithInlineVol(), + namespace: testNamespaceBaseline(), + driver: testCSIDriverInvalid(), + expectedError: fmt.Errorf("invalid label security.openshift.io/csi-ephemeral-volume-profile for CSIDriver test-driver: must be one of privileged, baseline, restricted"), + }, + { + name: "should deny pod admission if the namespace has an invalid profile label", + obj: testPodWithInlineVol(), + namespace: testNamespaceInvalid(), + driver: testCSIDriverRestricted(), + expectedError: fmt.Errorf("Invalid value: \"invalid-value\": must be one of privileged, baseline, restricted"), + }, + { + name: "should ignore types that do not have a pod spec", + obj: &coreapi.Service{}, + namespace: testNamespaceNoLabels(), + driver: testCSIDriverNoLabels(), + }, + } + + podSpecableObjects := []struct { + name string + obj runtime.Object + }{ + {"Pod", &coreapi.Pod{}}, + {"PodTemplate", &coreapi.PodTemplate{}}, + {"ReplicationController", &coreapi.ReplicationController{}}, + {"ReplicaSet", &appsapi.ReplicaSet{}}, + {"Deployment", &appsapi.Deployment{}}, + {"DaemonSet", &appsapi.DaemonSet{}}, + {"StatefulSet", &appsapi.StatefulSet{}}, + {"Job", &batchapi.Job{}}, + {"CronJob", &batchapi.CronJob{}}, + } + + // Add a standard subset of the tests for each supported object type + for _, pso := range podSpecableObjects { + objTests := []TestStruct{ + { + name: fmt.Sprintf("should deny %s admission by default when it has an inline volume and no policy is defined", pso.name), + obj: createPodControllerObject(pso.obj, testPodWithInlineVol()), + namespace: testNamespaceNoLabels(), + driver: testCSIDriverNoLabels(), + expectedError: fmt.Errorf(privNamespaceRequiredError), + }, + { + name: fmt.Sprintf("should deny %s admission with inline volumes in a baseline namespace when the driver uses the privileged label", pso.name), + obj: createPodControllerObject(pso.obj, testPodWithInlineVol()), + namespace: testNamespaceBaseline(), + driver: testCSIDriverPrivileged(), + expectedError: fmt.Errorf(privNamespaceRequiredError), + }, + { + name: fmt.Sprintf("should allow %s with only persistent volume claims", pso.name), + obj: createPodControllerObject(pso.obj, testPodWithPVC()), + namespace: testNamespaceNoLabels(), + driver: testCSIDriverNoLabels(), + }, + { + name: fmt.Sprintf("should allow %s with inline volumes when running in a privileged namespace", pso.name), + obj: createPodControllerObject(pso.obj, testPodWithInlineVol()), + namespace: testNamespacePrivileged(), + driver: testCSIDriverNoLabels(), + }, + { + name: fmt.Sprintf("should allow %s with inline volumes in a restricted namespace when the driver uses the restricted label", pso.name), + obj: createPodControllerObject(pso.obj, testPodWithInlineVol()), + namespace: testNamespaceRestricted(), + driver: testCSIDriverRestricted(), + }, + } + + tests = append(tests, objTests...) + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + c, err := getMockCSIInlineVolSec(test.namespace, test.driver) + if err != nil { + t.Fatalf("%s: failed getMockCSIInlineVolSec: %v", test.name, err) + } + + ns := test.namespace.Name + name := test.obj.(metav1.Object).GetName() + gvr := getObjectGroupVersionResource(test.obj) + attrs := admission.NewAttributesRecord(test.obj, nil, schema.GroupVersionKind{}, ns, name, gvr, "", admission.Create, nil, false, fakeUser()) + + err = c.Validate(context.TODO(), attrs, nil) + if err != nil { + if test.expectedError == nil { + t.Fatalf("%s: admission controller returned error: %v", test.name, err) + } + + if !strings.Contains(err.Error(), test.expectedError.Error()) { + t.Fatalf("%s: the expected error %v, got %v", test.name, test.expectedError, err) + } + } + + if err == nil && test.expectedError != nil { + t.Fatalf("%s: the expected error %v, got nil", test.name, test.expectedError) + } + }) + } +} + +func fakeUser() user.Info { + return &user.DefaultInfo{ + Name: "testuser", + } +} + +func testNamespaceNoLabels() *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: defaultNamespaceName, + }, + } +} + +func testNamespaceRestricted() *corev1.Namespace { + ns := testNamespaceNoLabels() + ns.Labels = map[string]string{ + "pod-security.kubernetes.io/audit": "restricted", + "pod-security.kubernetes.io/enforce": "restricted", + "pod-security.kubernetes.io/warn": "restricted", + } + return ns +} + +func testNamespaceBaseline() *corev1.Namespace { + ns := testNamespaceNoLabels() + ns.Labels = map[string]string{ + "pod-security.kubernetes.io/audit": "baseline", + "pod-security.kubernetes.io/enforce": "baseline", + "pod-security.kubernetes.io/warn": "baseline", + } + return ns +} + +func testNamespacePrivileged() *corev1.Namespace { + ns := testNamespaceNoLabels() + ns.Labels = map[string]string{ + "pod-security.kubernetes.io/audit": "privileged", + "pod-security.kubernetes.io/enforce": "privileged", + "pod-security.kubernetes.io/warn": "privileged", + } + return ns +} + +func testNamespaceInvalid() *corev1.Namespace { + ns := testNamespaceNoLabels() + ns.Labels = map[string]string{ + "pod-security.kubernetes.io/audit": "invalid-value", + "pod-security.kubernetes.io/enforce": "invalid-value", + "pod-security.kubernetes.io/warn": "invalid-value", + } + return ns +} + +func testCSIDriverNoLabels() *storagev1.CSIDriver { + return &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: defaultCSIDriverName, + }, + Spec: storagev1.CSIDriverSpec{ + VolumeLifecycleModes: []storagev1.VolumeLifecycleMode{ + storagev1.VolumeLifecycleEphemeral, + }, + }, + } +} + +func testCSIDriverRestricted() *storagev1.CSIDriver { + driver := testCSIDriverNoLabels() + driver.Labels = map[string]string{ + csiInlineVolProfileLabel: "restricted", + } + return driver +} + +func testCSIDriverBaseline() *storagev1.CSIDriver { + driver := testCSIDriverNoLabels() + driver.Labels = map[string]string{ + csiInlineVolProfileLabel: "baseline", + } + return driver +} + +func testCSIDriverPrivileged() *storagev1.CSIDriver { + driver := testCSIDriverNoLabels() + driver.Labels = map[string]string{ + csiInlineVolProfileLabel: "privileged", + } + return driver +} + +func testCSIDriverInvalid() *storagev1.CSIDriver { + driver := testCSIDriverNoLabels() + driver.Labels = map[string]string{ + csiInlineVolProfileLabel: "invalid-value", + } + return driver +} + +func testPod() *coreapi.Pod { + pod := &coreapi.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: defaultNamespaceName, + }, + Spec: coreapi.PodSpec{ + InitContainers: []coreapi.Container{ + { + Name: "initTest", + }, + }, + Containers: []coreapi.Container{ + { + Name: "test", + }, + }, + }, + } + + return pod +} + +func testPodWithInlineVol() *coreapi.Pod { + pod := testPod() + pod.Spec.Volumes = []coreapi.Volume{ + { + Name: "test-vol", + VolumeSource: coreapi.VolumeSource{ + CSI: &coreapi.CSIVolumeSource{ + Driver: defaultCSIDriverName, + }, + }, + }, + } + return pod +} + +func testPodWithPVC() *coreapi.Pod { + pod := testPod() + pod.Spec.Volumes = []coreapi.Volume{ + { + Name: "test-vol", + VolumeSource: coreapi.VolumeSource{ + PersistentVolumeClaim: &coreapi.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc", + }, + }, + }, + } + return pod +} + +func testPodWithInvalidDriverName() *coreapi.Pod { + pod := testPod() + pod.Spec.Volumes = []coreapi.Volume{ + { + Name: "test-vol", + VolumeSource: coreapi.VolumeSource{ + CSI: &coreapi.CSIVolumeSource{ + Driver: "invalid-csi-driver", + }, + }, + }, + } + return pod +} + +// Creates a pod controller object, given an object type and a pod for the template +func createPodControllerObject(obj runtime.Object, pod *coreapi.Pod) runtime.Object { + switch obj.(type) { + case *coreapi.Pod: + return pod + case *coreapi.PodTemplate: + return &coreapi.PodTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pod-template"}, + Template: coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + } + case *coreapi.ReplicationController: + return &coreapi.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{Name: "test-repl-controller"}, + Spec: coreapi.ReplicationControllerSpec{ + Template: &coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + } + case *appsapi.ReplicaSet: + return &appsapi.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test-replicaset"}, + Spec: appsapi.ReplicaSetSpec{ + Template: coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + } + case *appsapi.Deployment: + return &appsapi.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment"}, + Spec: appsapi.DeploymentSpec{ + Template: coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + } + case *appsapi.DaemonSet: + return &appsapi.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test-daemonset"}, + Spec: appsapi.DaemonSetSpec{ + Template: coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + } + case *appsapi.StatefulSet: + return &appsapi.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test-statefulset"}, + Spec: appsapi.StatefulSetSpec{ + Template: coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + } + case *batchapi.Job: + return &batchapi.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "test-job"}, + Spec: batchapi.JobSpec{ + Template: coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + } + case *batchapi.CronJob: + return &batchapi.CronJob{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cronjob"}, + Spec: batchapi.CronJobSpec{ + JobTemplate: batchapi.JobTemplateSpec{ + Spec: batchapi.JobSpec{ + Template: coreapi.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + }, + }, + } + default: + // If we can't add a pod template, just return the provided object. + return obj + } +} + +func getObjectGroupVersionResource(obj runtime.Object) schema.GroupVersionResource { + ver := "version" + switch obj.(type) { + case *coreapi.Pod: + return coreapi.Resource("pods").WithVersion(ver) + case *coreapi.PodTemplate: + return coreapi.Resource("podtemplates").WithVersion(ver) + case *coreapi.ReplicationController: + return coreapi.Resource("replicationcontrollers").WithVersion(ver) + case *appsapi.ReplicaSet: + return appsapi.Resource("replicasets").WithVersion(ver) + case *appsapi.Deployment: + return appsapi.Resource("deployments").WithVersion(ver) + case *appsapi.DaemonSet: + return appsapi.Resource("daemonsets").WithVersion(ver) + case *appsapi.StatefulSet: + return appsapi.Resource("statefulsets").WithVersion(ver) + case *batchapi.Job: + return batchapi.Resource("jobs").WithVersion(ver) + case *batchapi.CronJob: + return batchapi.Resource("cronjobs").WithVersion(ver) + default: + // If it's not a recognized object, return something invalid. + return coreapi.Resource("invalidresource").WithVersion("invalidversion") + } +} diff --git a/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/doc.go b/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/doc.go new file mode 100644 index 0000000000000..ad819a79135a8 --- /dev/null +++ b/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity/doc.go @@ -0,0 +1,7 @@ +package csiinlinevolumesecurity + +// The CSIInlineVolumeSecurity admission plugin inspects inline volumes +// on pod creation and compares the security.openshift.io/csi-ephemeral-volume-profile +// label on the associated CSIDriver object to the pod security profile on the namespace. +// Admission is only allowed if the namespace enforces a profile of equal or greater +// permission compared to the profile label for the CSIDriver. diff --git a/openshift-kube-apiserver/authorization/browsersafe/authorizer.go b/openshift-kube-apiserver/authorization/browsersafe/authorizer.go new file mode 100644 index 0000000000000..2b39b309f69b8 --- /dev/null +++ b/openshift-kube-apiserver/authorization/browsersafe/authorizer.go @@ -0,0 +1,107 @@ +package browsersafe + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +const ( + proxyAction = "proxy" + unsafeProxy = "unsafeproxy" +) + +type browserSafeAuthorizer struct { + delegate authorizer.Authorizer + + // list of groups, any of which indicate the request is authenticated + authenticatedGroups sets.String +} + +func NewBrowserSafeAuthorizer(delegate authorizer.Authorizer, authenticatedGroups ...string) authorizer.Authorizer { + return &browserSafeAuthorizer{ + delegate: delegate, + authenticatedGroups: sets.NewString(authenticatedGroups...), + } +} + +func (a *browserSafeAuthorizer) Authorize(ctx context.Context, attributes authorizer.Attributes) (authorizer.Decision, string, error) { + attrs := a.getBrowserSafeAttributes(attributes) + decision, reason, err := a.delegate.Authorize(ctx, attrs) + safeAttributes, changed := attrs.(*browserSafeAttributes) + + // check if the request was not allowed and we changed the attributes + if decision == authorizer.DecisionAllow || !changed { + return decision, reason, err + } + + // if so, use this information to update the reason + return decision, safeAttributes.reason(reason), err +} + +func (a *browserSafeAuthorizer) getBrowserSafeAttributes(attributes authorizer.Attributes) authorizer.Attributes { + if !attributes.IsResourceRequest() { + return attributes + } + + isProxyVerb := attributes.GetVerb() == proxyAction + isProxySubresource := attributes.GetSubresource() == proxyAction + + if !isProxyVerb && !isProxySubresource { + // Requests to non-proxy resources don't expose HTML or HTTP-handling user content to browsers + return attributes + } + + if user := attributes.GetUser(); user != nil { + if a.authenticatedGroups.HasAny(user.GetGroups()...) { + // An authenticated request indicates this isn't a browser page load. + // Browsers cannot make direct authenticated requests. + // This depends on the API not enabling basic or cookie-based auth. + return attributes + } + } + + return &browserSafeAttributes{ + Attributes: attributes, + isProxyVerb: isProxyVerb, + isProxySubresource: isProxySubresource, + } +} + +type browserSafeAttributes struct { + authorizer.Attributes + + isProxyVerb, isProxySubresource bool +} + +func (b *browserSafeAttributes) GetVerb() string { + if b.isProxyVerb { + return unsafeProxy + } + return b.Attributes.GetVerb() +} + +func (b *browserSafeAttributes) GetSubresource() string { + if b.isProxySubresource { + return unsafeProxy + } + return b.Attributes.GetSubresource() +} + +func (b *browserSafeAttributes) reason(reason string) string { + if b.isProxyVerb { + if len(reason) != 0 { + reason += ", " + } + reason += fmt.Sprintf("%s verb changed to %s", proxyAction, unsafeProxy) + } + if b.isProxySubresource { + if len(reason) != 0 { + reason += ", " + } + reason += fmt.Sprintf("%s subresource changed to %s", proxyAction, unsafeProxy) + } + return reason +} diff --git a/openshift-kube-apiserver/authorization/browsersafe/authorizer_test.go b/openshift-kube-apiserver/authorization/browsersafe/authorizer_test.go new file mode 100644 index 0000000000000..1d14a86daddeb --- /dev/null +++ b/openshift-kube-apiserver/authorization/browsersafe/authorizer_test.go @@ -0,0 +1,80 @@ +package browsersafe + +import ( + "context" + "testing" + + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +func TestBrowserSafeAuthorizer(t *testing.T) { + for name, tc := range map[string]struct { + attributes authorizer.Attributes + + expectedVerb string + expectedSubresource string + expectedReason string + }{ + "non-resource": { + attributes: authorizer.AttributesRecord{ResourceRequest: false, Verb: "GET"}, + expectedVerb: "GET", + }, + + "non-proxy": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "get", Resource: "pods", Subresource: "logs"}, + expectedVerb: "get", + expectedSubresource: "logs", + }, + + "unsafe proxy subresource": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "get", Resource: "pods", Subresource: "proxy"}, + expectedVerb: "get", + expectedSubresource: "unsafeproxy", + expectedReason: "proxy subresource changed to unsafeproxy", + }, + "unsafe proxy verb": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "proxy", Resource: "nodes"}, + expectedVerb: "unsafeproxy", + expectedReason: "proxy verb changed to unsafeproxy", + }, + "unsafe proxy verb anonymous": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "proxy", Resource: "nodes", + User: &user.DefaultInfo{Name: "system:anonymous", Groups: []string{"system:unauthenticated"}}}, + expectedVerb: "unsafeproxy", + expectedReason: "proxy verb changed to unsafeproxy", + }, + + "proxy subresource authenticated": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "get", Resource: "pods", Subresource: "proxy", + User: &user.DefaultInfo{Name: "bob", Groups: []string{"system:authenticated"}}}, + expectedVerb: "get", + expectedSubresource: "proxy", + }, + } { + delegateAuthorizer := &recordingAuthorizer{} + safeAuthorizer := NewBrowserSafeAuthorizer(delegateAuthorizer, "system:authenticated") + + authorized, reason, err := safeAuthorizer.Authorize(context.TODO(), tc.attributes) + if authorized == authorizer.DecisionAllow || reason != tc.expectedReason || err != nil { + t.Errorf("%s: unexpected output: %v %s %v", name, authorized, reason, err) + continue + } + + if delegateAuthorizer.attributes.GetVerb() != tc.expectedVerb { + t.Errorf("%s: expected verb %s, got %s", name, tc.expectedVerb, delegateAuthorizer.attributes.GetVerb()) + } + if delegateAuthorizer.attributes.GetSubresource() != tc.expectedSubresource { + t.Errorf("%s: expected verb %s, got %s", name, tc.expectedSubresource, delegateAuthorizer.attributes.GetSubresource()) + } + } +} + +type recordingAuthorizer struct { + attributes authorizer.Attributes +} + +func (t *recordingAuthorizer) Authorize(_ context.Context, a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) { + t.attributes = a + return authorizer.DecisionNoOpinion, "", nil +} diff --git a/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer.go b/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer.go new file mode 100644 index 0000000000000..989f70609528d --- /dev/null +++ b/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer.go @@ -0,0 +1,49 @@ +package scopeauthorizer + +import ( + "context" + "fmt" + + "k8s.io/apiserver/pkg/authorization/authorizer" + rbaclisters "k8s.io/client-go/listers/rbac/v1" + authorizerrbac "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" + + authorizationv1 "github.com/openshift/api/authorization/v1" + "github.com/openshift/apiserver-library-go/pkg/authorization/scope" +) + +type scopeAuthorizer struct { + clusterRoleGetter rbaclisters.ClusterRoleLister +} + +func NewAuthorizer(clusterRoleGetter rbaclisters.ClusterRoleLister) authorizer.Authorizer { + return &scopeAuthorizer{clusterRoleGetter: clusterRoleGetter} +} + +func (a *scopeAuthorizer) Authorize(ctx context.Context, attributes authorizer.Attributes) (authorizer.Decision, string, error) { + user := attributes.GetUser() + if user == nil { + return authorizer.DecisionNoOpinion, "", fmt.Errorf("user missing from context") + } + + scopes := user.GetExtra()[authorizationv1.ScopesKey] + if len(scopes) == 0 { + return authorizer.DecisionNoOpinion, "", nil + } + + nonFatalErrors := "" + + // scopeResolutionErrors aren't fatal. If any of the scopes we find allow this, then the overall scope limits allow it + rules, err := scope.ScopesToRules(scopes, attributes.GetNamespace(), a.clusterRoleGetter) + if err != nil { + nonFatalErrors = fmt.Sprintf(", additionally the following non-fatal errors were reported: %v", err) + } + + // check rules against attributes + if authorizerrbac.RulesAllow(attributes, rules...) { + return authorizer.DecisionNoOpinion, "", nil + } + + // the scope prevent this. We need to authoritatively deny + return authorizer.DecisionDeny, fmt.Sprintf("scopes %v prevent this action%s", scopes, nonFatalErrors), nil +} diff --git a/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer_test.go b/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer_test.go new file mode 100644 index 0000000000000..9b73e6c2e23ac --- /dev/null +++ b/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer_test.go @@ -0,0 +1,150 @@ +package scopeauthorizer + +import ( + "context" + "strings" + "testing" + + "k8s.io/apiserver/pkg/authentication/user" + kauthorizer "k8s.io/apiserver/pkg/authorization/authorizer" + + authorizationv1 "github.com/openshift/api/authorization/v1" +) + +func TestAuthorize(t *testing.T) { + testCases := []struct { + name string + attributes kauthorizer.AttributesRecord + expectedAllowed kauthorizer.Decision + expectedErr string + expectedMsg string + }{ + { + name: "no user", + attributes: kauthorizer.AttributesRecord{ + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionNoOpinion, + expectedErr: `user missing from context`, + }, + { + name: "no extra", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "empty extra", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{}}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "empty scopes", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {}}}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "bad scope", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"does-not-exist"}}}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionDeny, + expectedMsg: `scopes [does-not-exist] prevent this action, additionally the following non-fatal errors were reported: no scope evaluator found for "does-not-exist"`, + }, + { + name: "bad scope 2", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"role:dne"}}}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionDeny, + expectedMsg: `scopes [role:dne] prevent this action, additionally the following non-fatal errors were reported: bad format for scope role:dne`, + }, + { + name: "scope doesn't cover", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:info"}}}, + ResourceRequest: true, + Namespace: "ns", + Verb: "get", Resource: "users", Name: "harold"}, + expectedAllowed: kauthorizer.DecisionDeny, + expectedMsg: `scopes [user:info] prevent this action`, + }, + { + name: "scope covers", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:info"}}}, + ResourceRequest: true, + Namespace: "ns", + Verb: "get", Resource: "users", Name: "~"}, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "scope covers for discovery", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:info"}}}, + ResourceRequest: false, + Namespace: "ns", + Verb: "get", Path: "/api"}, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "user:full covers any resource", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:full"}}}, + ResourceRequest: true, + Namespace: "ns", + Verb: "update", Resource: "users", Name: "harold"}, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "user:full covers any non-resource", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:full"}}}, + ResourceRequest: false, + Namespace: "ns", + Verb: "post", Path: "/foo/bar/baz"}, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + authorizer := NewAuthorizer(nil) + + actualAllowed, actualMsg, actualErr := authorizer.Authorize(context.TODO(), tc.attributes) + switch { + case len(tc.expectedErr) == 0 && actualErr == nil: + case len(tc.expectedErr) == 0 && actualErr != nil: + t.Errorf("%s: unexpected error: %v", tc.name, actualErr) + case len(tc.expectedErr) != 0 && actualErr == nil: + t.Errorf("%s: missing error: %v", tc.name, tc.expectedErr) + case len(tc.expectedErr) != 0 && actualErr != nil: + if !strings.Contains(actualErr.Error(), tc.expectedErr) { + t.Errorf("expected %v, got %v", tc.expectedErr, actualErr) + } + } + if tc.expectedMsg != actualMsg { + t.Errorf("expected %v, got %v", tc.expectedMsg, actualMsg) + } + if tc.expectedAllowed != actualAllowed { + t.Errorf("expected %v, got %v", tc.expectedAllowed, actualAllowed) + } + }) + } +} diff --git a/openshift-kube-apiserver/configdefault/kubecontrolplane_default.go b/openshift-kube-apiserver/configdefault/kubecontrolplane_default.go new file mode 100644 index 0000000000000..7e48ecea2ec9a --- /dev/null +++ b/openshift-kube-apiserver/configdefault/kubecontrolplane_default.go @@ -0,0 +1,115 @@ +package configdefault + +import ( + "io/ioutil" + "os" + "path/filepath" + + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + "github.com/openshift/library-go/pkg/config/configdefaults" + "k8s.io/klog/v2" +) + +// ResolveDirectoriesForSATokenVerification takes our config (which allows directories) and navigates one level of +// those directories for files. This makes it easy to build a single configmap that contains lots of aggregated files. +// if we fail to open the file for inspection, the resolving code in kube-apiserver may have drifted from us +// we include the raw file and let the kube-apiserver succeed or fail. +func ResolveDirectoriesForSATokenVerification(config *kubecontrolplanev1.KubeAPIServerConfig) { + // kube doesn't honor directories, but we want to allow them in our sa token validators + resolvedSATokenValidationCerts := []string{} + for _, filename := range config.ServiceAccountPublicKeyFiles { + file, err := os.Open(filename) + if err != nil { + resolvedSATokenValidationCerts = append(resolvedSATokenValidationCerts, filename) + klog.Warningf(err.Error()) + continue + } + fileInfo, err := file.Stat() + if err != nil { + resolvedSATokenValidationCerts = append(resolvedSATokenValidationCerts, filename) + klog.Warningf(err.Error()) + continue + } + if !fileInfo.IsDir() { + resolvedSATokenValidationCerts = append(resolvedSATokenValidationCerts, filename) + continue + } + + contents, err := ioutil.ReadDir(filename) + switch { + case os.IsNotExist(err) || os.IsPermission(err): + klog.Warningf(err.Error()) + case err != nil: + panic(err) // some weird, unexpected error + default: + for _, content := range contents { + if !content.Mode().IsRegular() { + continue + } + resolvedSATokenValidationCerts = append(resolvedSATokenValidationCerts, filepath.Join(filename, content.Name())) + } + } + } + + config.ServiceAccountPublicKeyFiles = resolvedSATokenValidationCerts +} + +func SetRecommendedKubeAPIServerConfigDefaults(config *kubecontrolplanev1.KubeAPIServerConfig) { + configdefaults.DefaultString(&config.GenericAPIServerConfig.StorageConfig.StoragePrefix, "kubernetes.io") + configdefaults.DefaultString(&config.GenericAPIServerConfig.ServingInfo.BindAddress, "0.0.0.0:6443") + + configdefaults.SetRecommendedGenericAPIServerConfigDefaults(&config.GenericAPIServerConfig) + SetRecommendedMasterAuthConfigDefaults(&config.AuthConfig) + SetRecommendedAggregatorConfigDefaults(&config.AggregatorConfig) + SetRecommendedKubeletConnectionInfoDefaults(&config.KubeletClientInfo) + + configdefaults.DefaultString(&config.ServicesSubnet, "10.0.0.0/24") + configdefaults.DefaultString(&config.ServicesNodePortRange, "30000-32767") + + if len(config.ServiceAccountPublicKeyFiles) == 0 { + config.ServiceAccountPublicKeyFiles = append([]string{}, "/etc/kubernetes/static-pod-resources/configmaps/sa-token-signing-certs") + } + + // after the aggregator defaults are set, we can default the auth config values + // TODO this indicates that we're set two different things to the same value + if config.AuthConfig.RequestHeader == nil { + config.AuthConfig.RequestHeader = &kubecontrolplanev1.RequestHeaderAuthenticationOptions{} + configdefaults.DefaultStringSlice(&config.AuthConfig.RequestHeader.ClientCommonNames, []string{"system:openshift-aggregator"}) + configdefaults.DefaultString(&config.AuthConfig.RequestHeader.ClientCA, "/var/run/configmaps/aggregator-client-ca/ca-bundle.crt") + configdefaults.DefaultStringSlice(&config.AuthConfig.RequestHeader.UsernameHeaders, []string{"X-Remote-User"}) + configdefaults.DefaultStringSlice(&config.AuthConfig.RequestHeader.GroupHeaders, []string{"X-Remote-Group"}) + configdefaults.DefaultStringSlice(&config.AuthConfig.RequestHeader.ExtraHeaderPrefixes, []string{"X-Remote-Extra-"}) + } + + // Set defaults Cache TTLs for external Webhook Token Reviewers + for i := range config.AuthConfig.WebhookTokenAuthenticators { + if len(config.AuthConfig.WebhookTokenAuthenticators[i].CacheTTL) == 0 { + config.AuthConfig.WebhookTokenAuthenticators[i].CacheTTL = "2m" + } + } + + if config.OAuthConfig != nil { + for i := range config.OAuthConfig.IdentityProviders { + // By default, only let one identity provider authenticate a particular user + // If multiple identity providers collide, the second one in will fail to auth + // The admin can set this to "add" if they want to allow new identities to join existing users + configdefaults.DefaultString(&config.OAuthConfig.IdentityProviders[i].MappingMethod, "claim") + } + } +} + +func SetRecommendedMasterAuthConfigDefaults(config *kubecontrolplanev1.MasterAuthConfig) { +} + +func SetRecommendedAggregatorConfigDefaults(config *kubecontrolplanev1.AggregatorConfig) { + configdefaults.DefaultString(&config.ProxyClientInfo.KeyFile, "/var/run/secrets/aggregator-client/tls.key") + configdefaults.DefaultString(&config.ProxyClientInfo.CertFile, "/var/run/secrets/aggregator-client/tls.crt") +} + +func SetRecommendedKubeletConnectionInfoDefaults(config *kubecontrolplanev1.KubeletConnectionInfo) { + if config.Port == 0 { + config.Port = 10250 + } + configdefaults.DefaultString(&config.CertInfo.KeyFile, "/var/run/secrets/kubelet-client/tls.key") + configdefaults.DefaultString(&config.CertInfo.CertFile, "/var/run/secrets/kubelet-client/tls.crt") +} diff --git a/openshift-kube-apiserver/configdefault/kubecontrolplane_refs.go b/openshift-kube-apiserver/configdefault/kubecontrolplane_refs.go new file mode 100644 index 0000000000000..449952e5650d1 --- /dev/null +++ b/openshift-kube-apiserver/configdefault/kubecontrolplane_refs.go @@ -0,0 +1,122 @@ +package configdefault + +import ( + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + osinv1 "github.com/openshift/api/osin/v1" + "github.com/openshift/library-go/pkg/config/helpers" +) + +func GetKubeAPIServerConfigFileReferences(config *kubecontrolplanev1.KubeAPIServerConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + + refs = append(refs, helpers.GetGenericAPIServerConfigFileReferences(&config.GenericAPIServerConfig)...) + refs = append(refs, GetKubeletConnectionInfoFileReferences(&config.KubeletClientInfo)...) + + if config.OAuthConfig != nil { + refs = append(refs, GetOAuthConfigFileReferences(config.OAuthConfig)...) + } + + refs = append(refs, &config.AggregatorConfig.ProxyClientInfo.CertFile) + refs = append(refs, &config.AggregatorConfig.ProxyClientInfo.KeyFile) + + if config.AuthConfig.RequestHeader != nil { + refs = append(refs, &config.AuthConfig.RequestHeader.ClientCA) + } + for k := range config.AuthConfig.WebhookTokenAuthenticators { + refs = append(refs, &config.AuthConfig.WebhookTokenAuthenticators[k].ConfigFile) + } + if len(config.AuthConfig.OAuthMetadataFile) > 0 { + refs = append(refs, &config.AuthConfig.OAuthMetadataFile) + } + + refs = append(refs, &config.AggregatorConfig.ProxyClientInfo.CertFile) + refs = append(refs, &config.AggregatorConfig.ProxyClientInfo.KeyFile) + + for i := range config.ServiceAccountPublicKeyFiles { + refs = append(refs, &config.ServiceAccountPublicKeyFiles[i]) + } + + return refs +} + +func GetKubeletConnectionInfoFileReferences(config *kubecontrolplanev1.KubeletConnectionInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, helpers.GetCertFileReferences(&config.CertInfo)...) + refs = append(refs, &config.CA) + return refs +} + +func GetOAuthConfigFileReferences(config *osinv1.OAuthConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + + if config.MasterCA != nil { + refs = append(refs, config.MasterCA) + } + + refs = append(refs, GetSessionConfigFileReferences(config.SessionConfig)...) + for _, identityProvider := range config.IdentityProviders { + switch provider := identityProvider.Provider.Object.(type) { + case (*osinv1.RequestHeaderIdentityProvider): + refs = append(refs, &provider.ClientCA) + + case (*osinv1.HTPasswdPasswordIdentityProvider): + refs = append(refs, &provider.File) + + case (*osinv1.LDAPPasswordIdentityProvider): + refs = append(refs, &provider.CA) + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.BindPassword)...) + + case (*osinv1.BasicAuthPasswordIdentityProvider): + refs = append(refs, helpers.GetRemoteConnectionInfoFileReferences(&provider.RemoteConnectionInfo)...) + + case (*osinv1.KeystonePasswordIdentityProvider): + refs = append(refs, helpers.GetRemoteConnectionInfoFileReferences(&provider.RemoteConnectionInfo)...) + + case (*osinv1.GitLabIdentityProvider): + refs = append(refs, &provider.CA) + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.ClientSecret)...) + + case (*osinv1.OpenIDIdentityProvider): + refs = append(refs, &provider.CA) + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.ClientSecret)...) + + case (*osinv1.GoogleIdentityProvider): + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.ClientSecret)...) + + case (*osinv1.GitHubIdentityProvider): + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.ClientSecret)...) + refs = append(refs, &provider.CA) + + } + } + + if config.Templates != nil { + refs = append(refs, &config.Templates.Login) + refs = append(refs, &config.Templates.ProviderSelection) + refs = append(refs, &config.Templates.Error) + } + + return refs +} + +func GetSessionConfigFileReferences(config *osinv1.SessionConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.SessionSecretsFile) + return refs +} diff --git a/openshift-kube-apiserver/enablement/enablement.go b/openshift-kube-apiserver/enablement/enablement.go new file mode 100644 index 0000000000000..d955f66825181 --- /dev/null +++ b/openshift-kube-apiserver/enablement/enablement.go @@ -0,0 +1,71 @@ +package enablement + +import ( + "fmt" + "runtime/debug" + + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/rest" +) + +func ForceOpenShift(newOpenshiftConfig *kubecontrolplanev1.KubeAPIServerConfig) { + isOpenShift = true + openshiftConfig = newOpenshiftConfig +} + +func SetLoopbackClientConfig(kubeClientConfig *rest.Config) { + loopbackClientConfig = rest.CopyConfig(kubeClientConfig) +} + +var ( + isOpenShift = false + openshiftConfig *kubecontrolplanev1.KubeAPIServerConfig + postStartHooks = map[string]PostStartHookConfigEntry{} + appendPostStartHooksCalled = false + loopbackClientConfig *rest.Config +) + +type PostStartHookConfigEntry struct { + Hook genericapiserver.PostStartHookFunc + // originatingStack holds the stack that registered postStartHooks. This allows us to show a more helpful message + // for duplicate registration. + OriginatingStack string +} + +func IsOpenShift() bool { + return isOpenShift +} + +func OpenshiftConfig() *kubecontrolplanev1.KubeAPIServerConfig { + return openshiftConfig +} + +func LoopbackClientConfig() *rest.Config { + return loopbackClientConfig +} + +func AddPostStartHookOrDie(name string, hook genericapiserver.PostStartHookFunc) { + if appendPostStartHooksCalled { + panic(fmt.Errorf("already appended post start hooks")) + } + if len(name) == 0 { + panic(fmt.Errorf("missing name")) + } + if hook == nil { + panic(fmt.Errorf("hook func may not be nil: %q", name)) + } + + if postStartHook, exists := postStartHooks[name]; exists { + // this is programmer error, but it can be hard to debug + panic(fmt.Errorf("unable to add %q because it was already registered by: %s", name, postStartHook.OriginatingStack)) + } + postStartHooks[name] = PostStartHookConfigEntry{Hook: hook, OriginatingStack: string(debug.Stack())} +} + +func AppendPostStartHooksOrDie(config *genericapiserver.Config) { + appendPostStartHooksCalled = true + for name, curr := range postStartHooks { + config.AddPostStartHookOrDie(name, curr.Hook) + } +} diff --git a/openshift-kube-apiserver/enablement/intialization.go b/openshift-kube-apiserver/enablement/intialization.go new file mode 100644 index 0000000000000..a2421c627d607 --- /dev/null +++ b/openshift-kube-apiserver/enablement/intialization.go @@ -0,0 +1,85 @@ +package enablement + +import ( + "io/ioutil" + "path" + + configv1 "github.com/openshift/api/config/v1" + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + osinv1 "github.com/openshift/api/osin/v1" + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/clientcmd/api" + aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" + "k8s.io/kubernetes/openshift-kube-apiserver/configdefault" + "k8s.io/kubernetes/pkg/capabilities" + "k8s.io/kubernetes/pkg/kubeapiserver/authorizer" + kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy" +) + +func GetOpenshiftConfig(openshiftConfigFile string) (*kubecontrolplanev1.KubeAPIServerConfig, error) { + // try to decode into our new types first. right now there is no validation, no file path resolution. this unsticks the operator to start. + // TODO add those things + configContent, err := ioutil.ReadFile(openshiftConfigFile) + if err != nil { + return nil, err + } + scheme := runtime.NewScheme() + utilruntime.Must(kubecontrolplanev1.Install(scheme)) + codecs := serializer.NewCodecFactory(scheme) + obj, err := runtime.Decode(codecs.UniversalDecoder(kubecontrolplanev1.GroupVersion, configv1.GroupVersion, osinv1.GroupVersion), configContent) + if err != nil { + + return nil, err + } + + // Resolve relative to CWD + absoluteConfigFile, err := api.MakeAbs(openshiftConfigFile, "") + if err != nil { + return nil, err + } + configFileLocation := path.Dir(absoluteConfigFile) + + config := obj.(*kubecontrolplanev1.KubeAPIServerConfig) + if err := helpers.ResolvePaths(configdefault.GetKubeAPIServerConfigFileReferences(config), configFileLocation); err != nil { + return nil, err + } + configdefault.SetRecommendedKubeAPIServerConfigDefaults(config) + configdefault.ResolveDirectoriesForSATokenVerification(config) + + return config, nil +} + +func ForceGlobalInitializationForOpenShift() { + // This allows to move crqs, sccs, and rbrs to CRD + aggregatorapiserver.AddAlwaysLocalDelegateForPrefix("/apis/quota.openshift.io/v1/clusterresourcequotas") + aggregatorapiserver.AddAlwaysLocalDelegateForPrefix("/apis/security.openshift.io/v1/securitycontextconstraints") + aggregatorapiserver.AddAlwaysLocalDelegateForPrefix("/apis/authorization.openshift.io/v1/rolebindingrestrictions") + aggregatorapiserver.AddAlwaysLocalDelegateGroupResource(schema.GroupResource{Group: "authorization.openshift.io", Resource: "rolebindingrestrictions"}) + + // This allows the CRD registration to avoid fighting with the APIService from the operator + aggregatorapiserver.AddOverlappingGroupVersion(schema.GroupVersion{Group: "authorization.openshift.io", Version: "v1"}) + + // Allow privileged containers + capabilities.Initialize(capabilities.Capabilities{ + AllowPrivileged: true, + PrivilegedSources: capabilities.PrivilegedSources{ + HostNetworkSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, + HostPIDSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, + HostIPCSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, + }, + }) + + // add permissions we require on our kube-apiserver + // TODO, we should scrub these out + bootstrappolicy.ClusterRoles = bootstrappolicy.OpenshiftClusterRoles + bootstrappolicy.ClusterRoleBindings = bootstrappolicy.OpenshiftClusterRoleBindings + + // we need to have the authorization chain place something before system:masters + // SkipSystemMastersAuthorizer disable implicitly added system/master authz, and turn it into another authz mode "SystemMasters", to be added via authorization-mode + authorizer.SkipSystemMastersAuthorizer() +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/flags.go b/openshift-kube-apiserver/openshiftkubeapiserver/flags.go new file mode 100644 index 0000000000000..a77253141fe29 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/flags.go @@ -0,0 +1,112 @@ +package openshiftkubeapiserver + +import ( + "fmt" + "io/ioutil" + "net" + "strings" + + configv1 "github.com/openshift/api/config/v1" + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + "github.com/openshift/apiserver-library-go/pkg/configflags" + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + apiserverv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" +) + +func ConfigToFlags(kubeAPIServerConfig *kubecontrolplanev1.KubeAPIServerConfig) ([]string, error) { + args := unmaskArgs(kubeAPIServerConfig.APIServerArguments) + + host, portString, err := net.SplitHostPort(kubeAPIServerConfig.ServingInfo.BindAddress) + if err != nil { + return nil, err + } + + admissionFlags, err := admissionFlags(kubeAPIServerConfig.AdmissionConfig) + if err != nil { + return nil, err + } + for flag, value := range admissionFlags { + configflags.SetIfUnset(args, flag, value...) + } + for flag, value := range configflags.AuditFlags(&kubeAPIServerConfig.AuditConfig, configflags.ArgsWithPrefix(args, "audit-")) { + configflags.SetIfUnset(args, flag, value...) + } + configflags.SetIfUnset(args, "bind-address", host) + configflags.SetIfUnset(args, "cors-allowed-origins", kubeAPIServerConfig.CORSAllowedOrigins...) + configflags.SetIfUnset(args, "secure-port", portString) + configflags.SetIfUnset(args, "service-account-key-file", kubeAPIServerConfig.ServiceAccountPublicKeyFiles...) + configflags.SetIfUnset(args, "service-cluster-ip-range", kubeAPIServerConfig.ServicesSubnet) + configflags.SetIfUnset(args, "tls-cipher-suites", kubeAPIServerConfig.ServingInfo.CipherSuites...) + configflags.SetIfUnset(args, "tls-min-version", kubeAPIServerConfig.ServingInfo.MinTLSVersion) + configflags.SetIfUnset(args, "tls-sni-cert-key", sniCertKeys(kubeAPIServerConfig.ServingInfo.NamedCertificates)...) + + return configflags.ToFlagSlice(args), nil +} + +func admissionFlags(admissionConfig configv1.AdmissionConfig) (map[string][]string, error) { + args := map[string][]string{} + + upstreamAdmissionConfig, err := ConvertOpenshiftAdmissionConfigToKubeAdmissionConfig(admissionConfig.PluginConfig) + if err != nil { + return nil, err + } + configBytes, err := helpers.WriteYAML(upstreamAdmissionConfig, apiserverv1alpha1.AddToScheme) + if err != nil { + return nil, err + } + + tempFile, err := ioutil.TempFile("", "kubeapiserver-admission-config.yaml") + if err != nil { + return nil, err + } + if _, err := tempFile.Write(configBytes); err != nil { + return nil, err + } + tempFile.Close() + + configflags.SetIfUnset(args, "admission-control-config-file", tempFile.Name()) + + return args, nil +} + +func sniCertKeys(namedCertificates []configv1.NamedCertificate) []string { + args := []string{} + for _, nc := range namedCertificates { + names := "" + if len(nc.Names) > 0 { + names = ":" + strings.Join(nc.Names, ",") + } + args = append(args, fmt.Sprintf("%s,%s%s", nc.CertFile, nc.KeyFile, names)) + } + return args +} + +func unmaskArgs(args map[string]kubecontrolplanev1.Arguments) map[string][]string { + ret := map[string][]string{} + for key, slice := range args { + for _, val := range slice { + ret[key] = append(ret[key], val) + } + } + return ret +} + +func ConvertOpenshiftAdmissionConfigToKubeAdmissionConfig(in map[string]configv1.AdmissionPluginConfig) (*apiserverv1alpha1.AdmissionConfiguration, error) { + ret := &apiserverv1alpha1.AdmissionConfiguration{} + + for _, pluginName := range sets.StringKeySet(in).List() { + kubeConfig := apiserverv1alpha1.AdmissionPluginConfiguration{ + Name: pluginName, + Path: in[pluginName].Location, + Configuration: &runtime.Unknown{ + Raw: in[pluginName].Configuration.Raw, + }, + } + + ret.Plugins = append(ret.Plugins, kubeConfig) + } + + return ret, nil +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/flags_test.go b/openshift-kube-apiserver/openshiftkubeapiserver/flags_test.go new file mode 100644 index 0000000000000..3241b9b432981 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/flags_test.go @@ -0,0 +1,26 @@ +package openshiftkubeapiserver + +import ( + "testing" + + "github.com/openshift/api/config/v1" +) + +func TestSNICertKeys(t *testing.T) { + testCases := []struct { + names []string + expected string + }{ + {names: []string{"foo"}, expected: "secret.crt,secret.key:foo"}, + {names: []string{"foo", "bar"}, expected: "secret.crt,secret.key:foo,bar"}, + {expected: "secret.crt,secret.key"}, + } + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + result := sniCertKeys([]v1.NamedCertificate{{Names: tc.names, CertInfo: v1.CertInfo{CertFile: "secret.crt", KeyFile: "secret.key"}}}) + if len(result) != 1 || result[0] != tc.expected { + t.Errorf("expected: %v, actual: %v", []string{tc.expected}, result) + } + }) + } +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/patch.go b/openshift-kube-apiserver/openshiftkubeapiserver/patch.go new file mode 100644 index 0000000000000..39172be5b88d0 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/patch.go @@ -0,0 +1,181 @@ +package openshiftkubeapiserver + +import ( + "time" + + "k8s.io/kubernetes/openshift-kube-apiserver/enablement" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/quota/v1/generic" + genericapiserver "k8s.io/apiserver/pkg/server" + clientgoinformers "k8s.io/client-go/informers" + corev1informers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/quota/v1/install" + + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy" + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators" + "github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota" + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission" + configclient "github.com/openshift/client-go/config/clientset/versioned" + configv1informer "github.com/openshift/client-go/config/informers/externalversions" + quotaclient "github.com/openshift/client-go/quota/clientset/versioned" + quotainformer "github.com/openshift/client-go/quota/informers/externalversions" + quotav1informer "github.com/openshift/client-go/quota/informers/externalversions/quota/v1" + securityv1client "github.com/openshift/client-go/security/clientset/versioned" + securityv1informer "github.com/openshift/client-go/security/informers/externalversions" + userclient "github.com/openshift/client-go/user/clientset/versioned" + userinformer "github.com/openshift/client-go/user/informers/externalversions" + "github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig" + "github.com/openshift/library-go/pkg/apiserver/apiserverconfig" + "github.com/openshift/library-go/pkg/quota/clusterquotamapping" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/authorization/restrictusers" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/authorization/restrictusers/usercache" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managednode" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/nodeenv" +) + +func OpenShiftKubeAPIServerConfigPatch(genericConfig *genericapiserver.Config, kubeInformers clientgoinformers.SharedInformerFactory, pluginInitializers *[]admission.PluginInitializer) error { + if !enablement.IsOpenShift() { + return nil + } + + openshiftInformers, err := newInformers(genericConfig.LoopbackClientConfig) + if err != nil { + return err + } + + // AUTHORIZER + genericConfig.RequestInfoResolver = apiserverconfig.OpenshiftRequestInfoResolver() + // END AUTHORIZER + + // Inject OpenShift API long running endpoints (like for binary builds). + // TODO: We should disable the timeout code for aggregated endpoints as this can cause problems when upstream add additional endpoints. + genericConfig.LongRunningFunc = apiserverconfig.IsLongRunningRequest + + // ADMISSION + clusterQuotaMappingController := newClusterQuotaMappingController(kubeInformers.Core().V1().Namespaces(), openshiftInformers.OpenshiftQuotaInformers.Quota().V1().ClusterResourceQuotas()) + genericConfig.AddPostStartHookOrDie("quota.openshift.io-clusterquotamapping", func(context genericapiserver.PostStartHookContext) error { + go clusterQuotaMappingController.Run(5, context.StopCh) + return nil + }) + + *pluginInitializers = append(*pluginInitializers, + imagepolicy.NewInitializer(imagereferencemutators.KubeImageMutators{}, enablement.OpenshiftConfig().ImagePolicyConfig.InternalRegistryHostname), + restrictusers.NewInitializer(openshiftInformers.getOpenshiftUserInformers()), + sccadmission.NewInitializer(openshiftInformers.getOpenshiftSecurityInformers().Security().V1().SecurityContextConstraints()), + clusterresourcequota.NewInitializer( + openshiftInformers.getOpenshiftQuotaInformers().Quota().V1().ClusterResourceQuotas(), + clusterQuotaMappingController.GetClusterQuotaMapper(), + generic.NewRegistry(install.NewQuotaConfigurationForAdmission().Evaluators()), + ), + nodeenv.NewInitializer(enablement.OpenshiftConfig().ProjectConfig.DefaultNodeSelector), + admissionrestconfig.NewInitializer(*rest.CopyConfig(genericConfig.LoopbackClientConfig)), + managementcpusoverride.NewInitializer(openshiftInformers.getOpenshiftInfraInformers().Config().V1().Infrastructures()), + managednode.NewInitializer(openshiftInformers.getOpenshiftInfraInformers().Config().V1().Infrastructures()), + ) + // END ADMISSION + + // HANDLER CHAIN (with oauth server and web console) + genericConfig.BuildHandlerChainFunc, err = BuildHandlerChain(enablement.OpenshiftConfig().ConsolePublicURL, enablement.OpenshiftConfig().AuthConfig.OAuthMetadataFile) + if err != nil { + return err + } + // END HANDLER CHAIN + + openshiftAPIServiceReachabilityCheck := newOpenshiftAPIServiceReachabilityCheck() + oauthAPIServiceReachabilityCheck := newOAuthPIServiceReachabilityCheck() + genericConfig.ReadyzChecks = append(genericConfig.ReadyzChecks, openshiftAPIServiceReachabilityCheck, oauthAPIServiceReachabilityCheck) + + genericConfig.AddPostStartHookOrDie("openshift.io-startkubeinformers", func(context genericapiserver.PostStartHookContext) error { + go openshiftInformers.Start(context.StopCh) + return nil + }) + genericConfig.AddPostStartHookOrDie("openshift.io-openshift-apiserver-reachable", func(context genericapiserver.PostStartHookContext) error { + go openshiftAPIServiceReachabilityCheck.checkForConnection(context) + return nil + }) + genericConfig.AddPostStartHookOrDie("openshift.io-oauth-apiserver-reachable", func(context genericapiserver.PostStartHookContext) error { + go oauthAPIServiceReachabilityCheck.checkForConnection(context) + return nil + }) + enablement.AppendPostStartHooksOrDie(genericConfig) + + return nil +} + +// newInformers is only exposed for the build's integration testing until it can be fixed more appropriately. +func newInformers(loopbackClientConfig *rest.Config) (*kubeAPIServerInformers, error) { + // ClusterResourceQuota is served using CRD resource any status update must use JSON + jsonLoopbackClientConfig := rest.CopyConfig(loopbackClientConfig) + jsonLoopbackClientConfig.ContentConfig.AcceptContentTypes = "application/json" + jsonLoopbackClientConfig.ContentConfig.ContentType = "application/json" + + quotaClient, err := quotaclient.NewForConfig(jsonLoopbackClientConfig) + if err != nil { + return nil, err + } + securityClient, err := securityv1client.NewForConfig(jsonLoopbackClientConfig) + if err != nil { + return nil, err + } + userClient, err := userclient.NewForConfig(loopbackClientConfig) + if err != nil { + return nil, err + } + configClient, err := configclient.NewForConfig(loopbackClientConfig) + if err != nil { + return nil, err + } + + // TODO find a single place to create and start informers. During the 1.7 rebase this will come more naturally in a config object, + // before then we should try to eliminate our direct to storage access. It's making us do weird things. + const defaultInformerResyncPeriod = 10 * time.Minute + + ret := &kubeAPIServerInformers{ + OpenshiftQuotaInformers: quotainformer.NewSharedInformerFactory(quotaClient, defaultInformerResyncPeriod), + OpenshiftSecurityInformers: securityv1informer.NewSharedInformerFactory(securityClient, defaultInformerResyncPeriod), + OpenshiftUserInformers: userinformer.NewSharedInformerFactory(userClient, defaultInformerResyncPeriod), + OpenshiftConfigInformers: configv1informer.NewSharedInformerFactory(configClient, defaultInformerResyncPeriod), + } + if err := ret.OpenshiftUserInformers.User().V1().Groups().Informer().AddIndexers(cache.Indexers{ + usercache.ByUserIndexName: usercache.ByUserIndexKeys, + }); err != nil { + return nil, err + } + + return ret, nil +} + +type kubeAPIServerInformers struct { + OpenshiftQuotaInformers quotainformer.SharedInformerFactory + OpenshiftSecurityInformers securityv1informer.SharedInformerFactory + OpenshiftUserInformers userinformer.SharedInformerFactory + OpenshiftConfigInformers configv1informer.SharedInformerFactory +} + +func (i *kubeAPIServerInformers) getOpenshiftQuotaInformers() quotainformer.SharedInformerFactory { + return i.OpenshiftQuotaInformers +} +func (i *kubeAPIServerInformers) getOpenshiftSecurityInformers() securityv1informer.SharedInformerFactory { + return i.OpenshiftSecurityInformers +} +func (i *kubeAPIServerInformers) getOpenshiftUserInformers() userinformer.SharedInformerFactory { + return i.OpenshiftUserInformers +} +func (i *kubeAPIServerInformers) getOpenshiftInfraInformers() configv1informer.SharedInformerFactory { + return i.OpenshiftConfigInformers +} + +func (i *kubeAPIServerInformers) Start(stopCh <-chan struct{}) { + i.OpenshiftQuotaInformers.Start(stopCh) + i.OpenshiftSecurityInformers.Start(stopCh) + i.OpenshiftUserInformers.Start(stopCh) + i.OpenshiftConfigInformers.Start(stopCh) +} + +func newClusterQuotaMappingController(nsInternalInformer corev1informers.NamespaceInformer, clusterQuotaInformer quotav1informer.ClusterResourceQuotaInformer) *clusterquotamapping.ClusterQuotaMappingController { + return clusterquotamapping.NewClusterQuotaMappingController(nsInternalInformer, clusterQuotaInformer) +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go b/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go new file mode 100644 index 0000000000000..804116c1efa1d --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go @@ -0,0 +1,97 @@ +package openshiftkubeapiserver + +import ( + "net/http" + "strings" + + authenticationv1 "k8s.io/api/authentication/v1" + genericapiserver "k8s.io/apiserver/pkg/server" + + authorizationv1 "github.com/openshift/api/authorization/v1" + "github.com/openshift/library-go/pkg/apiserver/httprequest" +) + +// TODO switch back to taking a kubeapiserver config. For now make it obviously safe for 3.11 +func BuildHandlerChain(consolePublicURL string, oauthMetadataFile string) (func(apiHandler http.Handler, kc *genericapiserver.Config) http.Handler, error) { + // load the oauthmetadata when we can return an error + oAuthMetadata := []byte{} + if len(oauthMetadataFile) > 0 { + var err error + oAuthMetadata, err = loadOAuthMetadataFile(oauthMetadataFile) + if err != nil { + return nil, err + } + } + + return func(apiHandler http.Handler, genericConfig *genericapiserver.Config) http.Handler { + // well-known comes after the normal handling chain. This shows where to connect for oauth information + handler := withOAuthInfo(apiHandler, oAuthMetadata) + + // this is the normal kube handler chain + handler = genericapiserver.DefaultBuildHandlerChain(handler, genericConfig) + + // these handlers are all before the normal kube chain + handler = translateLegacyScopeImpersonation(handler) + + // redirects from / and /console to consolePublicURL if you're using a browser + handler = withConsoleRedirect(handler, consolePublicURL) + + return handler + }, + nil +} + +// If we know the location of the asset server, redirect to it when / is requested +// and the Accept header supports text/html +func withOAuthInfo(handler http.Handler, oAuthMetadata []byte) http.Handler { + if len(oAuthMetadata) == 0 { + return handler + } + + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.URL.Path != oauthMetadataEndpoint { + // Dispatch to the next handler + handler.ServeHTTP(w, req) + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write(oAuthMetadata) + }) +} + +// If we know the location of the asset server, redirect to it when / is requested +// and the Accept header supports text/html +func withConsoleRedirect(handler http.Handler, consolePublicURL string) http.Handler { + if len(consolePublicURL) == 0 { + return handler + } + + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if strings.HasPrefix(req.URL.Path, "/console") || + (req.URL.Path == "/" && httprequest.PrefersHTML(req)) { + http.Redirect(w, req, consolePublicURL, http.StatusFound) + return + } + // Dispatch to the next handler + handler.ServeHTTP(w, req) + }) +} + +// legacyImpersonateUserScopeHeader is the header name older servers were using +// just for scopes, so we need to translate it from clients that may still be +// using it. +const legacyImpersonateUserScopeHeader = "Impersonate-User-Scope" + +// translateLegacyScopeImpersonation is a filter that will translates user scope impersonation for openshift into the equivalent kube headers. +func translateLegacyScopeImpersonation(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + for _, scope := range req.Header[legacyImpersonateUserScopeHeader] { + req.Header[authenticationv1.ImpersonateUserExtraHeaderPrefix+authorizationv1.ScopesKey] = + append(req.Header[authenticationv1.ImpersonateUserExtraHeaderPrefix+authorizationv1.ScopesKey], scope) + } + + handler.ServeHTTP(w, req) + }) +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/sdn_readyz_wait.go b/openshift-kube-apiserver/openshiftkubeapiserver/sdn_readyz_wait.go new file mode 100644 index 0000000000000..59f5353d29528 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/sdn_readyz_wait.go @@ -0,0 +1,146 @@ +package openshiftkubeapiserver + +import ( + gocontext "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/http/httputil" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" +) + +func newOpenshiftAPIServiceReachabilityCheck() *aggregatedAPIServiceAvailabilityCheck { + return newAggregatedAPIServiceReachabilityCheck("openshift-apiserver", "api") +} + +func newOAuthPIServiceReachabilityCheck() *aggregatedAPIServiceAvailabilityCheck { + return newAggregatedAPIServiceReachabilityCheck("openshift-oauth-apiserver", "api") +} + +// if the API service is not found, then this check returns quickly. +// if the endpoint is not accessible within 60 seconds, we report ready no matter what +// otherwise, wait for up to 60 seconds to be able to reach the apiserver +func newAggregatedAPIServiceReachabilityCheck(namespace, service string) *aggregatedAPIServiceAvailabilityCheck { + return &aggregatedAPIServiceAvailabilityCheck{ + done: make(chan struct{}), + namespace: namespace, + serviceName: service, + } +} + +type aggregatedAPIServiceAvailabilityCheck struct { + // done indicates that this check is complete (success or failure) and the check should return true + done chan struct{} + + // namespace is the namespace hosting the service for the aggregated api + namespace string + // serviceName is used to get a list of endpoints to directly dial + serviceName string +} + +func (c *aggregatedAPIServiceAvailabilityCheck) Name() string { + return fmt.Sprintf("%s-%s-available", c.serviceName, c.namespace) +} + +func (c *aggregatedAPIServiceAvailabilityCheck) Check(req *http.Request) error { + select { + case <-c.done: + return nil + default: + return fmt.Errorf("check is not yet complete") + } +} + +func (c *aggregatedAPIServiceAvailabilityCheck) checkForConnection(context genericapiserver.PostStartHookContext) { + defer utilruntime.HandleCrash() + + reachedAggregatedAPIServer := make(chan struct{}) + noAggregatedAPIServer := make(chan struct{}) + waitUntilCh := make(chan struct{}) + defer func() { + close(waitUntilCh) // this stops the endpoint check + close(c.done) // once this method is done, the ready check should return true + }() + start := time.Now() + + kubeClient, err := kubernetes.NewForConfig(context.LoopbackClientConfig) + if err != nil { + // shouldn't happen. this means the loopback config didn't work. + panic(err) + } + + // Start a thread which repeatedly tries to connect to any aggregated apiserver endpoint. + // 1. if the aggregated apiserver endpoint doesn't exist, logs a warning and reports ready + // 2. if a connection cannot be made, after 60 seconds logs an error and reports ready -- this avoids a rebootstrapping cycle + // 3. as soon as a connection can be made, logs a time to be ready and reports ready. + go func() { + defer utilruntime.HandleCrash() + + client := http.Client{ + Transport: &http.Transport{ + // since any http return code satisfies us, we don't bother to send credentials. + // we don't care about someone faking a response and we aren't sending credentials, so we don't check the server CA + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + Timeout: 1 * time.Second, // these should all be very fast. if none work, we continue anyway. + } + + wait.PollImmediateUntil(1*time.Second, func() (bool, error) { + ctx := gocontext.TODO() + openshiftEndpoints, err := kubeClient.CoreV1().Endpoints(c.namespace).Get(ctx, c.serviceName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + // if we have no aggregated apiserver endpoint, we have no reason to wait + klog.Warningf("%s.%s.svc endpoints were not found", c.serviceName, c.namespace) + close(noAggregatedAPIServer) + return true, nil + } + if err != nil { + utilruntime.HandleError(err) + return false, nil + } + for _, subset := range openshiftEndpoints.Subsets { + for _, address := range subset.Addresses { + url := fmt.Sprintf("https://%v", net.JoinHostPort(address.IP, "8443")) + resp, err := client.Get(url) + if err == nil { // any http response is fine. it means that we made contact + response, dumpErr := httputil.DumpResponse(resp, true) + klog.V(4).Infof("reached to connect to %q: %v\n%v", url, dumpErr, string(response)) + close(reachedAggregatedAPIServer) + resp.Body.Close() + return true, nil + } + klog.V(2).Infof("failed to connect to %q: %v", url, err) + } + } + + return false, nil + }, waitUntilCh) + }() + + select { + case <-time.After(60 * time.Second): + // if we timeout, always return ok so that we can start from a case where all kube-apiservers are down and the SDN isn't coming up + utilruntime.HandleError(fmt.Errorf("%s never reached apiserver", c.Name())) + return + case <-context.StopCh: + utilruntime.HandleError(fmt.Errorf("%s interrupted", c.Name())) + return + case <-noAggregatedAPIServer: + utilruntime.HandleError(fmt.Errorf("%s did not find an %s endpoint", c.Name(), c.namespace)) + return + + case <-reachedAggregatedAPIServer: + end := time.Now() + klog.Infof("reached %s via SDN after %v milliseconds", c.namespace, end.Sub(start).Milliseconds()) + return + } +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/wellknown_oauth.go b/openshift-kube-apiserver/openshiftkubeapiserver/wellknown_oauth.go new file mode 100644 index 0000000000000..8b34da7aa3203 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/wellknown_oauth.go @@ -0,0 +1,57 @@ +package openshiftkubeapiserver + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + + "github.com/openshift/library-go/pkg/oauth/oauthdiscovery" +) + +const ( + // Discovery endpoint for OAuth 2.0 Authorization Server Metadata + // See IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + oauthMetadataEndpoint = "/.well-known/oauth-authorization-server" +) + +func validateURL(urlString string) error { + urlObj, err := url.Parse(urlString) + if err != nil { + return fmt.Errorf("%q is an invalid URL: %v", urlString, err) + } + if len(urlObj.Scheme) == 0 { + return fmt.Errorf("must contain a valid scheme") + } + if len(urlObj.Host) == 0 { + return fmt.Errorf("must contain a valid host") + } + return nil +} + +func loadOAuthMetadataFile(metadataFile string) ([]byte, error) { + data, err := ioutil.ReadFile(metadataFile) + if err != nil { + return nil, fmt.Errorf("unable to read External OAuth Metadata file: %v", err) + } + + oauthMetadata := &oauthdiscovery.OauthAuthorizationServerMetadata{} + if err := json.Unmarshal(data, oauthMetadata); err != nil { + return nil, fmt.Errorf("unable to decode External OAuth Metadata file: %v", err) + } + + if err := validateURL(oauthMetadata.Issuer); err != nil { + return nil, fmt.Errorf("error validating External OAuth Metadata Issuer field: %v", err) + } + + if err := validateURL(oauthMetadata.AuthorizationEndpoint); err != nil { + return nil, fmt.Errorf("error validating External OAuth Metadata AuthorizationEndpoint field: %v", err) + } + + if err := validateURL(oauthMetadata.TokenEndpoint); err != nil { + return nil, fmt.Errorf("error validating External OAuth Metadata TokenEndpoint field: %v", err) + } + + return data, nil +}