diff --git a/assets/alertmanager/service-monitor.yaml b/assets/alertmanager/service-monitor.yaml index a8e9aadd24..7e86e1be71 100644 --- a/assets/alertmanager/service-monitor.yaml +++ b/assets/alertmanager/service-monitor.yaml @@ -16,6 +16,8 @@ spec: scheme: https tlsConfig: caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt + certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt + keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key serverName: alertmanager-main selector: matchLabels: diff --git a/assets/cluster-monitoring-operator/metrics-client-ca.yaml b/assets/cluster-monitoring-operator/metrics-client-ca.yaml new file mode 100644 index 0000000000..6e47d9bce4 --- /dev/null +++ b/assets/cluster-monitoring-operator/metrics-client-ca.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +data: {} +kind: ConfigMap +metadata: + name: metrics-client-ca + namespace: openshift-monitoring diff --git a/assets/cluster-monitoring-operator/metrics-client-certs.yaml b/assets/cluster-monitoring-operator/metrics-client-certs.yaml new file mode 100644 index 0000000000..5dbf7b8b53 --- /dev/null +++ b/assets/cluster-monitoring-operator/metrics-client-certs.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +data: {} +kind: Secret +metadata: + name: metrics-client-certs + namespace: openshift-monitoring +type: Opaque diff --git a/assets/control-plane/service-monitor-kubelet.yaml b/assets/control-plane/service-monitor-kubelet.yaml index 994f395feb..6122883d7d 100644 --- a/assets/control-plane/service-monitor-kubelet.yaml +++ b/assets/control-plane/service-monitor-kubelet.yaml @@ -53,7 +53,9 @@ spec: scrapeTimeout: 30s tlsConfig: caFile: /etc/prometheus/configmaps/kubelet-serving-ca-bundle/ca-bundle.crt + certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt insecureSkipVerify: false + keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token honorLabels: true honorTimestamps: false @@ -77,7 +79,9 @@ spec: scrapeTimeout: 30s tlsConfig: caFile: /etc/prometheus/configmaps/kubelet-serving-ca-bundle/ca-bundle.crt + certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt insecureSkipVerify: false + keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token honorLabels: true interval: 30s @@ -91,7 +95,9 @@ spec: scrapeTimeout: 30s tlsConfig: caFile: /etc/prometheus/configmaps/kubelet-serving-ca-bundle/ca-bundle.crt + certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt insecureSkipVerify: false + keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key - interval: 30s metricRelabelings: - action: drop diff --git a/assets/grafana/service-monitor.yaml b/assets/grafana/service-monitor.yaml index 1b08a01e35..8c796bbd1d 100644 --- a/assets/grafana/service-monitor.yaml +++ b/assets/grafana/service-monitor.yaml @@ -16,6 +16,8 @@ spec: scheme: https tlsConfig: caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt + certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt + keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key serverName: server-name-replaced-at-runtime selector: matchLabels: diff --git a/assets/kube-state-metrics/deployment.yaml b/assets/kube-state-metrics/deployment.yaml index 00f6d3c141..ff4261a879 100644 --- a/assets/kube-state-metrics/deployment.yaml +++ b/assets/kube-state-metrics/deployment.yaml @@ -65,6 +65,7 @@ spec: - --upstream=http://127.0.0.1:8081/ - --tls-cert-file=/etc/tls/private/tls.crt - --tls-private-key-file=/etc/tls/private/tls.key + - --client-ca-file=/etc/tls/client/client-ca.crt image: quay.io/brancz/kube-rbac-proxy:v0.9.0 name: kube-rbac-proxy-main ports: @@ -80,6 +81,9 @@ spec: - mountPath: /etc/tls/private name: kube-state-metrics-tls readOnly: false + - mountPath: /etc/tls/client + name: metrics-client-ca + readOnly: false - args: - --logtostderr - --secure-listen-address=:9443 @@ -87,6 +91,7 @@ spec: - --upstream=http://127.0.0.1:8082/ - --tls-cert-file=/etc/tls/private/tls.crt - --tls-private-key-file=/etc/tls/private/tls.key + - --client-ca-file=/etc/tls/client/client-ca.crt image: quay.io/brancz/kube-rbac-proxy:v0.9.0 name: kube-rbac-proxy-self ports: @@ -102,6 +107,9 @@ spec: - mountPath: /etc/tls/private name: kube-state-metrics-tls readOnly: false + - mountPath: /etc/tls/client + name: metrics-client-ca + readOnly: false nodeSelector: kubernetes.io/os: linux priorityClassName: system-cluster-critical @@ -113,3 +121,6 @@ spec: - name: kube-state-metrics-tls secret: secretName: kube-state-metrics-tls + - configMap: + name: metrics-client-ca + name: metrics-client-ca diff --git a/assets/kube-state-metrics/service-monitor.yaml b/assets/kube-state-metrics/service-monitor.yaml index 845954c08c..7065d7d48a 100644 --- a/assets/kube-state-metrics/service-monitor.yaml +++ b/assets/kube-state-metrics/service-monitor.yaml @@ -24,6 +24,8 @@ spec: scrapeTimeout: 1m tlsConfig: caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt + certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt + keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key serverName: server-name-replaced-at-runtime - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token interval: 1m diff --git a/assets/node-exporter/daemonset.yaml b/assets/node-exporter/daemonset.yaml index 834cae1166..455529bbe7 100644 --- a/assets/node-exporter/daemonset.yaml +++ b/assets/node-exporter/daemonset.yaml @@ -63,6 +63,7 @@ spec: - --upstream=http://127.0.0.1:9100/ - --tls-cert-file=/etc/tls/private/tls.crt - --tls-private-key-file=/etc/tls/private/tls.key + - --client-ca-file=/etc/tls/client/client-ca.crt env: - name: IP valueFrom: @@ -87,6 +88,9 @@ spec: - mountPath: /etc/tls/private name: node-exporter-tls readOnly: false + - mountPath: /etc/tls/client + name: metrics-client-ca + readOnly: false hostNetwork: true hostPID: true initContainers: @@ -139,6 +143,9 @@ spec: path: /var/log/wtmp type: File name: node-exporter-wtmp + - configMap: + name: metrics-client-ca + name: metrics-client-ca updateStrategy: rollingUpdate: maxUnavailable: 10% diff --git a/assets/node-exporter/service-monitor.yaml b/assets/node-exporter/service-monitor.yaml index 5884e1194e..842757c5d9 100644 --- a/assets/node-exporter/service-monitor.yaml +++ b/assets/node-exporter/service-monitor.yaml @@ -23,7 +23,9 @@ spec: scheme: https tlsConfig: caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt + certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt insecureSkipVerify: false + keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key serverName: server-name-replaced-at-runtime jobLabel: app.kubernetes.io/name selector: diff --git a/assets/prometheus-k8s/prometheus.yaml b/assets/prometheus-k8s/prometheus.yaml index 3aa52c78c3..3532f72051 100644 --- a/assets/prometheus-k8s/prometheus.yaml +++ b/assets/prometheus-k8s/prometheus.yaml @@ -198,6 +198,7 @@ spec: - prometheus-k8s-proxy - prometheus-k8s-thanos-sidecar-tls - kube-rbac-proxy + - metrics-client-certs securityContext: fsGroup: 65534 runAsNonRoot: true diff --git a/assets/prometheus-k8s/service-monitor-thanos-sidecar.yaml b/assets/prometheus-k8s/service-monitor-thanos-sidecar.yaml index 4e4add90dc..77bfacc6d0 100644 --- a/assets/prometheus-k8s/service-monitor-thanos-sidecar.yaml +++ b/assets/prometheus-k8s/service-monitor-thanos-sidecar.yaml @@ -17,6 +17,8 @@ spec: scheme: https tlsConfig: caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt + certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt + keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key serverName: prometheus-k8s-thanos-sidecar selector: matchLabels: diff --git a/assets/prometheus-k8s/service-monitor.yaml b/assets/prometheus-k8s/service-monitor.yaml index dec29fe0e4..f39c1ce7b3 100644 --- a/assets/prometheus-k8s/service-monitor.yaml +++ b/assets/prometheus-k8s/service-monitor.yaml @@ -16,6 +16,8 @@ spec: scheme: https tlsConfig: caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt + certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt + keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key serverName: prometheus-k8s selector: matchLabels: diff --git a/assets/prometheus-operator/deployment.yaml b/assets/prometheus-operator/deployment.yaml index d4378cb206..ab8d2b4a64 100644 --- a/assets/prometheus-operator/deployment.yaml +++ b/assets/prometheus-operator/deployment.yaml @@ -60,6 +60,7 @@ spec: - --upstream=https://prometheus-operator.openshift-monitoring.svc:8080/ - --tls-cert-file=/etc/tls/private/tls.crt - --tls-private-key-file=/etc/tls/private/tls.key + - --client-ca-file=/etc/tls/client/client-ca.crt - --upstream-ca-file=/etc/configmaps/operator-cert-ca-bundle/service-ca.crt image: quay.io/brancz/kube-rbac-proxy:v0.9.0 name: kube-rbac-proxy @@ -79,6 +80,9 @@ spec: - mountPath: /etc/configmaps/operator-cert-ca-bundle name: operator-certs-ca-bundle readOnly: false + - mountPath: /etc/tls/client + name: metrics-client-ca + readOnly: false nodeSelector: kubernetes.io/os: linux node-role.kubernetes.io/master: "" @@ -96,3 +100,6 @@ spec: - configMap: name: operator-certs-ca-bundle name: operator-certs-ca-bundle + - configMap: + name: metrics-client-ca + name: metrics-client-ca diff --git a/assets/prometheus-operator/service-monitor.yaml b/assets/prometheus-operator/service-monitor.yaml index 7eeb84f900..3824565161 100644 --- a/assets/prometheus-operator/service-monitor.yaml +++ b/assets/prometheus-operator/service-monitor.yaml @@ -16,6 +16,8 @@ spec: scheme: https tlsConfig: caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt + certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt + keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key serverName: server-name-replaced-at-runtime selector: matchLabels: diff --git a/assets/thanos-querier/service-monitor.yaml b/assets/thanos-querier/service-monitor.yaml index 435e3a8a80..80f2629a34 100644 --- a/assets/thanos-querier/service-monitor.yaml +++ b/assets/thanos-querier/service-monitor.yaml @@ -16,6 +16,8 @@ spec: scheme: https tlsConfig: caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt + certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt + keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key serverName: server-name-replaced-at-runtime selector: matchLabels: diff --git a/assets/thanos-ruler/service-monitor.yaml b/assets/thanos-ruler/service-monitor.yaml index 0fc4709364..e8bc3bee4c 100644 --- a/assets/thanos-ruler/service-monitor.yaml +++ b/assets/thanos-ruler/service-monitor.yaml @@ -13,6 +13,8 @@ spec: scheme: https tlsConfig: caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt + certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt + keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key serverName: server-name-replaced-at-runtime selector: matchLabels: diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 59f4b58c7f..0f8098c77b 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -203,7 +203,7 @@ func Main() int { wg, ctx := errgroup.WithContext(ctx) - wg.Go(func() error { return o.Run(ctx.Done()) }) + wg.Go(func() error { return o.Run(ctx) }) term := make(chan os.Signal) signal.Notify(term, os.Interrupt, syscall.SIGTERM) diff --git a/go.mod b/go.mod index 6edbd102a1..37fef69e3e 100644 --- a/go.mod +++ b/go.mod @@ -7,9 +7,9 @@ require ( github.com/ghodss/yaml v1.0.0 github.com/gogo/protobuf v1.3.2 github.com/imdario/mergo v0.3.7 - github.com/openshift/api v0.0.0-20210225162315-bae60f47eed7 - github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49 - github.com/openshift/library-go v0.0.0-20210113192829-cfbb3f4c80c2 + github.com/openshift/api v0.0.0-20210706092853-b63d499a70ce + github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 + github.com/openshift/library-go v0.0.0-20210720093535-f8ed43828870 github.com/pkg/errors v0.9.1 github.com/prometheus-operator/prometheus-operator v0.47.1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.48.1 @@ -24,7 +24,7 @@ require ( k8s.io/apiserver v0.21.2 k8s.io/client-go v12.0.0+incompatible k8s.io/klog/v2 v2.8.0 - k8s.io/kube-aggregator v0.20.0 + k8s.io/kube-aggregator v0.21.1 k8s.io/metrics v0.19.4 ) diff --git a/go.sum b/go.sum index d8840661a7..10b2dc6a1b 100644 --- a/go.sum +++ b/go.sum @@ -207,7 +207,6 @@ github.com/cenkalti/backoff v1.0.0/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/cespare/xxhash v0.0.0-20181017004759-096ff4a8a059/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -235,7 +234,6 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -306,12 +304,10 @@ github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.0.0-20190731215715-7f13a5c99f4b/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -361,8 +357,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= -github.com/fsouza/go-dockerclient v0.0.0-20171004212419-da3951ba2e9e/go.mod h1:KpcjM623fQYE9MZiTGzKhjfxXAV9wbyX2C1cyRHfhl0= -github.com/getsentry/raven-go v0.0.0-20190513200303-c977f96e1095/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -1022,15 +1016,14 @@ github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20191031171055-b133feaeeb2e/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/openshift/api v0.0.0-20201214114959-164a2fb63b5f/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg= -github.com/openshift/api v0.0.0-20210225162315-bae60f47eed7 h1:RvBqGKZN2FVrnXR0MzOaFdsveCyq/DOLTJrwqpKa8e0= -github.com/openshift/api v0.0.0-20210225162315-bae60f47eed7/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg= -github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= -github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49 h1:7NmjUkJtGHpMTE/n8ia6itbCdZ7eYuTCXKc/zsA7OSM= -github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49/go.mod h1:9/jG4I6sh+5QublJpZZ4Zs/P4/QCXMsQQ/K/058bSB8= -github.com/openshift/library-go v0.0.0-20210113192829-cfbb3f4c80c2 h1:UAzyFzukxe6zvQKbORXtY3C3hyRkXYfPkDeJXV89DpU= -github.com/openshift/library-go v0.0.0-20210113192829-cfbb3f4c80c2/go.mod h1:udseDnqxn5ON8i+NBjDp00fBTK0JRu1/6Y6tf6EivDE= +github.com/openshift/api v0.0.0-20210521075222-e273a339932a/go.mod h1:izBmoXbUu3z5kUa4FjZhvekTsyzIWiOoaIgJiZBBMQs= +github.com/openshift/api v0.0.0-20210706092853-b63d499a70ce h1:TI9mXBgwq/coqN9NV85CbF1JOpXhvDSSAn33xdNw+zg= +github.com/openshift/api v0.0.0-20210706092853-b63d499a70ce/go.mod h1:izBmoXbUu3z5kUa4FjZhvekTsyzIWiOoaIgJiZBBMQs= +github.com/openshift/build-machinery-go v0.0.0-20210423112049-9415d7ebd33e/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 h1:ZHRIMCFIJN1p9LsJt4HQ+akDrys4PrYnXzOWI5LK03I= +github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142/go.mod h1:fjS8r9mqDVsPb5td3NehsNOAWa4uiFkYEfVZioQ2gH0= +github.com/openshift/library-go v0.0.0-20210720093535-f8ed43828870 h1:xhtl3hJFfICWRPhLfu0xvX44rhR2Gf91LPRND2TdPPY= +github.com/openshift/library-go v0.0.0-20210720093535-f8ed43828870/go.mod h1:rln3LbFNOpENSvhmsfH7g/hqc58IF78+o96yAAp5mq0= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= @@ -1174,6 +1167,7 @@ github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:J github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -1289,8 +1283,6 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099/go.mod h1:hz10LOsAdzC3K/iXaKoFxOKTDRgxJl+BTGX1GY+TzO4= @@ -1961,17 +1953,17 @@ k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk= -k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg= k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= k8s.io/api v0.20.5/go.mod h1:FQjAceXnVaWDeov2YUWhOb6Yt+5UjErkp6UO3nczO1Y= k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= +k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= k8s.io/api v0.21.2 h1:vz7DqmRsXTCSa6pNxXwQ1IYeAZgdIsua+DZU+o+SX3Y= k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8= k8s.io/apiextensions-apiserver v0.18.0-beta.2/go.mod h1:Hnrg5jx8/PbxRbUoqDGxtQkULjwx8FDW4WYJaKNK+fk= k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE= -k8s.io/apiextensions-apiserver v0.20.0/go.mod h1:ZH+C33L2Bh1LY1+HphoRmN1IQVLTShVcTojivK3N9xg= k8s.io/apiextensions-apiserver v0.21.0/go.mod h1:gsQGNtGkc/YoDG9loKI0V+oLZM4ljRPjc/sql5tmvzc= +k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= k8s.io/apiextensions-apiserver v0.21.2 h1:+exKMRep4pDrphEafRvpEi79wTnCFMqKf8LBtlA3yrE= k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= @@ -1985,18 +1977,18 @@ k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCk k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.5/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apimachinery v0.21.2 h1:vezUc/BHqWlQDnZ+XkrpXSmnANSLbpnlpwo0Lhk0gpc= k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= k8s.io/apiserver v0.18.0-beta.2/go.mod h1:bnblMkMoCFnIfVnVftd0SXJPzyvrk3RtaqSbblphF/A= k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw= k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= -k8s.io/apiserver v0.20.0/go.mod h1:6gRIWiOkvGvQt12WTYmsiYoUyYW0FXSiMdNl4m+sxY8= k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= +k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= k8s.io/apiserver v0.21.2 h1:vfGLD8biFXHzbcIEXyW3652lDwkV8tZEFJAaS2iuJlw= k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= k8s.io/client-go v0.21.2 h1:Q1j4L/iMN4pTw6Y4DWppBoUxgKO8LbffEMVEV00MUp0= @@ -2005,15 +1997,15 @@ k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+ k8s.io/code-generator v0.18.0-beta.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= k8s.io/code-generator v0.19.4/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= -k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= k8s.io/component-base v0.18.0-beta.2/go.mod h1:HVk5FpRnyzQ/MjBr9//e/yEBjTVa2qjGXCTuUzcD7ks= k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k= k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= -k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoRXeA= k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= +k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= k8s.io/component-base v0.21.2 h1:EsnmFFoJ86cEywC0DoIkAUiEV6fjgauNugiw1lmIjs4= k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -2021,7 +2013,6 @@ k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8 k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -2037,8 +2028,8 @@ k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-aggregator v0.18.0-beta.2/go.mod h1:O3Td9mheraINbLHH4pzoFP2gRzG0Wk1COqzdSL4rBPk= -k8s.io/kube-aggregator v0.20.0 h1:hXjICaaB1d1vRFGTNbOd0Agdn56rihxeGvS8zpuoWuE= -k8s.io/kube-aggregator v0.20.0/go.mod h1:3Is/gzzWmhhG/rA3CpA1+eVye87lreBQDFGcAGT7gzo= +k8s.io/kube-aggregator v0.21.1 h1:3pPRhOXZcJYjNDjPDizFx0G5//DArWKANZE03J5z8Ck= +k8s.io/kube-aggregator v0.21.1/go.mod h1:cAZ0n02IiSl57sQSHz4vvrz3upQRMbytOiZnpPJaQzQ= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6/go.mod h1:RZvgC8MSN6DjiMV6oIfEE9pDL9CYXokkfaCKZeHm3nc= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= @@ -2072,11 +2063,10 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE= -sigs.k8s.io/kube-storage-version-migrator v0.0.3/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= +sigs.k8s.io/kube-storage-version-migrator v0.0.4/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= diff --git a/jsonnet/alertmanager.libsonnet b/jsonnet/alertmanager.libsonnet index bb7c25d009..56333b5209 100644 --- a/jsonnet/alertmanager.libsonnet +++ b/jsonnet/alertmanager.libsonnet @@ -195,6 +195,8 @@ function(params) tlsConfig: { caFile: '/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt', serverName: 'alertmanager-main', + certFile: '/etc/prometheus/secrets/metrics-client-certs/tls.crt', + keyFile: '/etc/prometheus/secrets/metrics-client-certs/tls.key', }, bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token', }, diff --git a/jsonnet/cluster-monitoring-operator.libsonnet b/jsonnet/cluster-monitoring-operator.libsonnet index 0a9859eebc..57af9d1f2e 100644 --- a/jsonnet/cluster-monitoring-operator.libsonnet +++ b/jsonnet/cluster-monitoring-operator.libsonnet @@ -53,6 +53,27 @@ function(params) { }, }, + metricsClientCerts: { + apiVersion: 'v1', + kind: 'Secret', + metadata: { + name: 'metrics-client-certs', + namespace: cfg.namespace, + }, + type: 'Opaque', + data: {}, + }, + + metricsClientCa: { + apiVersion: 'v1', + kind: 'ConfigMap', + metadata: { + name: 'metrics-client-ca', + namespace: cfg.namespace, + }, + data: {}, + }, + service: { apiVersion: 'v1', kind: 'Service', @@ -93,6 +114,11 @@ function(params) { port: 'https', scheme: 'https', tlsConfig: { + // don't specify the certificate authentication for the operator since + // it itself creates the client CA copy in the namespace and therefore + // we cannot use it in the operator's kube-rbac-proxy config + // TODO: this could be fixed by using library-go's controller setup boilerplate + // code caFile: '/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt', serverName: 'server-name-replaced-at-runtime', }, @@ -181,6 +207,21 @@ function(params) { resources: ['poddisruptionbudgets'], verbs: ['create', 'get', 'update', 'delete'], }, + { + apiGroups: ['certificates.k8s.io'], + resources: ['certificatesigningrequests'], + verbs: ['create', 'get', 'list', 'watch', 'update', 'delete'], + }, + { + apiGroups: ['certificates.k8s.io'], + resources: ['certificatesigningrequests/approval', 'certificatesigningrequests/status'], + verbs: ['get', 'list', 'watch'], + }, + { + apiGroups: [''], + resources: ['events'], + verbs: ['create', 'patch', 'update'], + }, ], }, diff --git a/jsonnet/control-plane.libsonnet b/jsonnet/control-plane.libsonnet index 0f3aa237b0..bfc69bd82d 100644 --- a/jsonnet/control-plane.libsonnet +++ b/jsonnet/control-plane.libsonnet @@ -68,6 +68,8 @@ function(params) tlsConfig+: { caFile: '/etc/prometheus/configmaps/kubelet-serving-ca-bundle/ca-bundle.crt', insecureSkipVerify: false, + certFile: '/etc/prometheus/secrets/metrics-client-certs/tls.crt', + keyFile: '/etc/prometheus/secrets/metrics-client-certs/tls.key', }, } + { diff --git a/jsonnet/grafana.libsonnet b/jsonnet/grafana.libsonnet index aae121d0b7..f7ef241c6e 100644 --- a/jsonnet/grafana.libsonnet +++ b/jsonnet/grafana.libsonnet @@ -69,6 +69,8 @@ function(params) tlsConfig: { caFile: '/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt', serverName: 'server-name-replaced-at-runtime', + certFile: '/etc/prometheus/secrets/metrics-client-certs/tls.crt', + keyFile: '/etc/prometheus/secrets/metrics-client-certs/tls.key', }, }, ], diff --git a/jsonnet/kube-state-metrics.libsonnet b/jsonnet/kube-state-metrics.libsonnet index 2b9451cc53..275ad3883c 100644 --- a/jsonnet/kube-state-metrics.libsonnet +++ b/jsonnet/kube-state-metrics.libsonnet @@ -33,6 +33,8 @@ function(params) tlsConfig: { caFile: '/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt', serverName: 'server-name-replaced-at-runtime', + certFile: '/etc/prometheus/secrets/metrics-client-certs/tls.crt', + keyFile: '/etc/prometheus/secrets/metrics-client-certs/tls.key', }, // Drop the "instance" and "pod" labels since we're runinng only // one instance of kube-state-metrics. The "instance" label must be @@ -91,13 +93,21 @@ function(params) args+: [ '--tls-cert-file=/etc/tls/private/tls.crt', '--tls-private-key-file=/etc/tls/private/tls.key', + '--client-ca-file=/etc/tls/client/client-ca.crt', ], terminationMessagePolicy: 'FallbackToLogsOnError', - volumeMounts: [{ - mountPath: '/etc/tls/private', - name: tlsVolumeName, - readOnly: false, - }], + volumeMounts: [ + { + mountPath: '/etc/tls/private', + name: tlsVolumeName, + readOnly: false, + }, + { + mountPath: '/etc/tls/client', + name: 'metrics-client-ca', + readOnly: false, + }, + ], securityContext: {}, resources: { requests: { @@ -138,6 +148,12 @@ function(params) secretName: 'kube-state-metrics-tls', }, }, + { + name: 'metrics-client-ca', + configMap: { + name: 'metrics-client-ca', + }, + }, ], securityContext: {}, priorityClassName: 'system-cluster-critical', diff --git a/jsonnet/node-exporter.libsonnet b/jsonnet/node-exporter.libsonnet index 41ca48e4e9..46875baf51 100644 --- a/jsonnet/node-exporter.libsonnet +++ b/jsonnet/node-exporter.libsonnet @@ -30,6 +30,8 @@ function(params) tlsConfig+: { caFile: '/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt', serverName: 'server-name-replaced-at-runtime', + certFile: '/etc/prometheus/secrets/metrics-client-certs/tls.crt', + keyFile: '/etc/prometheus/secrets/metrics-client-certs/tls.key', insecureSkipVerify: false, }, }, @@ -118,13 +120,21 @@ function(params) args+: [ '--tls-cert-file=/etc/tls/private/tls.crt', '--tls-private-key-file=/etc/tls/private/tls.key', + '--client-ca-file=/etc/tls/client/client-ca.crt', ], terminationMessagePolicy: 'FallbackToLogsOnError', - volumeMounts: [{ - mountPath: '/etc/tls/private', - name: tlsVolumeName, - readOnly: false, - }], + volumeMounts: [ + { + mountPath: '/etc/tls/private', + name: tlsVolumeName, + readOnly: false, + }, + { + mountPath: '/etc/tls/client', + name: 'metrics-client-ca', + readOnly: false, + }, + ], resources: { requests: { memory: '15Mi', @@ -185,6 +195,12 @@ function(params) type: 'File', }, }, + { + name: 'metrics-client-ca', + configMap: { + name: 'metrics-client-ca', + }, + }, ], securityContext: {}, priorityClassName: 'system-cluster-critical', diff --git a/jsonnet/prometheus-adapter.libsonnet b/jsonnet/prometheus-adapter.libsonnet index 4dd3bac7f3..c12722e5a6 100644 --- a/jsonnet/prometheus-adapter.libsonnet +++ b/jsonnet/prometheus-adapter.libsonnet @@ -59,6 +59,8 @@ function(params) caFile: '/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt', serverName: 'server-name-replaced-at-runtime', insecureSkipVerify: false, + // TODO: prometheus-adapter currently is a stock upstream aggregated api server. + // It does not support static authorization. }, }, super.endpoints diff --git a/jsonnet/prometheus-operator.libsonnet b/jsonnet/prometheus-operator.libsonnet index d61959936b..a91a1d04d1 100644 --- a/jsonnet/prometheus-operator.libsonnet +++ b/jsonnet/prometheus-operator.libsonnet @@ -59,6 +59,7 @@ function(params) '--upstream=https://prometheus-operator.openshift-monitoring.svc:8080/', '--tls-cert-file=/etc/tls/private/tls.crt', '--tls-private-key-file=/etc/tls/private/tls.key', + '--client-ca-file=/etc/tls/client/client-ca.crt', '--upstream-ca-file=/etc/configmaps/operator-cert-ca-bundle/service-ca.crt', ], terminationMessagePolicy: 'FallbackToLogsOnError', @@ -73,6 +74,11 @@ function(params) name: certsCAVolumeName, readOnly: false, }, + { + mountPath: '/etc/tls/client', + name: 'metrics-client-ca', + readOnly: false, + }, ], securityContext: {}, resources: { @@ -100,6 +106,12 @@ function(params) name: certsCAVolumeName, }, }, + { + name: 'metrics-client-ca', + configMap: { + name: 'metrics-client-ca', + }, + }, ], }, }, @@ -128,6 +140,8 @@ function(params) tlsConfig: { caFile: '/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt', serverName: 'server-name-replaced-at-runtime', + certFile: '/etc/prometheus/secrets/metrics-client-certs/tls.crt', + keyFile: '/etc/prometheus/secrets/metrics-client-certs/tls.key', }, }, ], diff --git a/jsonnet/prometheus.libsonnet b/jsonnet/prometheus.libsonnet index 056a70110d..b3a3f95655 100644 --- a/jsonnet/prometheus.libsonnet +++ b/jsonnet/prometheus.libsonnet @@ -249,6 +249,8 @@ function(params) tlsConfig: { caFile: '/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt', serverName: 'prometheus-k8s', + certFile: '/etc/prometheus/secrets/metrics-client-certs/tls.crt', + keyFile: '/etc/prometheus/secrets/metrics-client-certs/tls.key', }, bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token', }, @@ -282,6 +284,8 @@ function(params) tlsConfig: { caFile: '/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt', serverName: 'prometheus-k8s-thanos-sidecar', + certFile: '/etc/prometheus/secrets/metrics-client-certs/tls.crt', + keyFile: '/etc/prometheus/secrets/metrics-client-certs/tls.key', }, bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token', }, @@ -335,6 +339,7 @@ function(params) 'prometheus-k8s-proxy', 'prometheus-k8s-thanos-sidecar-tls', 'kube-rbac-proxy', + 'metrics-client-certs', ], configMaps: ['serving-certs-ca-bundle', 'kubelet-serving-ca-bundle'], probeNamespaceSelector: cfg.namespaceSelector, diff --git a/jsonnet/thanos-querier.libsonnet b/jsonnet/thanos-querier.libsonnet index e7697471f2..de188fe127 100644 --- a/jsonnet/thanos-querier.libsonnet +++ b/jsonnet/thanos-querier.libsonnet @@ -258,6 +258,8 @@ function(params) tlsConfig: { caFile: '/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt', serverName: 'server-name-replaced-at-runtime', + certFile: '/etc/prometheus/secrets/metrics-client-certs/tls.crt', + keyFile: '/etc/prometheus/secrets/metrics-client-certs/tls.key', }, bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token', }, diff --git a/jsonnet/thanos-ruler.libsonnet b/jsonnet/thanos-ruler.libsonnet index 653abb734a..9019f65635 100644 --- a/jsonnet/thanos-ruler.libsonnet +++ b/jsonnet/thanos-ruler.libsonnet @@ -307,6 +307,8 @@ function(params) { tlsConfig: { caFile: '/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt', serverName: 'server-name-replaced-at-runtime', + certFile: '/etc/prometheus/secrets/metrics-client-certs/tls.crt', + keyFile: '/etc/prometheus/secrets/metrics-client-certs/tls.key', }, bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token', }, diff --git a/manifests/0000_50_cluster-monitoring-operator_02-role.yaml b/manifests/0000_50_cluster-monitoring-operator_02-role.yaml index 880ddbdb58..ddb98d38fb 100644 --- a/manifests/0000_50_cluster-monitoring-operator_02-role.yaml +++ b/manifests/0000_50_cluster-monitoring-operator_02-role.yaml @@ -139,6 +139,34 @@ rules: - get - update - delete +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - create + - get + - list + - watch + - update + - delete +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/approval + - certificatesigningrequests/status + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update - apiGroups: - authentication.k8s.io resources: diff --git a/pkg/manifests/manifests.go b/pkg/manifests/manifests.go index f42a54b0e1..2cf154c9a6 100644 --- a/pkg/manifests/manifests.go +++ b/pkg/manifests/manifests.go @@ -50,6 +50,7 @@ const ( sharedConfigMap = "monitoring-shared-config" htpasswdArg = "-htpasswd-file=/etc/proxy/htpasswd/auth" + clientCAArg = "--client-ca-file=/etc/tls/client/client-ca.crt" ) var ( @@ -186,6 +187,8 @@ var ( ClusterMonitoringEditUserWorkloadConfigRole = "cluster-monitoring-operator/user-workload-config-edit-role.yaml" ClusterMonitoringGrpcTLSSecret = "cluster-monitoring-operator/grpc-tls-secret.yaml" ClusterMonitoringOperatorPrometheusRule = "cluster-monitoring-operator/prometheus-rule.yaml" + ClusterMonitoringMetricsClientCertsSecret = "cluster-monitoring-operator/metrics-client-certs.yaml" + ClusterMonitoringMetricsClientCACM = "cluster-monitoring-operator/metrics-client-ca.yaml" TelemeterClientClusterRole = "telemeter-client/cluster-role.yaml" TelemeterClientClusterRoleBinding = "telemeter-client/cluster-role-binding.yaml" @@ -1367,6 +1370,33 @@ func (f *Factory) PrometheusK8s(host string, grpcTLS *v1.Secret, trustedCABundle p.Spec.Containers[i].Image = f.config.Images.KubeRbacProxy case "kube-rbac-proxy-thanos": p.Spec.Containers[i].Image = f.config.Images.KubeRbacProxy + + p.Spec.Containers[i].Args = append( + p.Spec.Containers[i].Args, + clientCAArg, + ) + + p.Spec.Containers[i].VolumeMounts = append( + p.Spec.Containers[i].VolumeMounts, + v1.VolumeMount{ + Name: "metrics-client-ca", + MountPath: "/etc/tls/client", + ReadOnly: true, + }, + ) + + p.Spec.Volumes = append( + p.Spec.Volumes, + v1.Volume{ + Name: "metrics-client-ca", + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "metrics-client-ca", + }, + }, + }, + }) case "prom-label-proxy": p.Spec.Containers[i].Image = f.config.Images.PromLabelProxy } diff --git a/pkg/manifests/tls.go b/pkg/manifests/tls.go index c6bbef1b59..1fd768467f 100644 --- a/pkg/manifests/tls.go +++ b/pkg/manifests/tls.go @@ -54,6 +54,39 @@ func (f *Factory) GRPCSecret() (*v1.Secret, error) { return s, nil } +func (f *Factory) MetricsClientCerts() (*v1.Secret, error) { + s, err := f.NewSecret(f.assets.MustNewAssetReader(ClusterMonitoringMetricsClientCertsSecret)) + if err != nil { + return nil, err + } + + s.Namespace = f.namespace + s.Data = make(map[string][]byte) + s.Annotations = make(map[string]string) + + return s, nil +} + +func (f *Factory) MetricsClientCACM(apiAuthConfigmap *v1.ConfigMap) (*v1.ConfigMap, error) { + cm, err := f.NewConfigMap(f.assets.MustNewAssetReader(ClusterMonitoringMetricsClientCACM)) + if err != nil { + return nil, err + } + + cm.Namespace = f.namespace + cm.Data = make(map[string]string) + cm.Annotations = make(map[string]string) + + clientCA, ok := apiAuthConfigmap.Data["client-ca-file"] + if !ok { + return nil, errors.New("missing client-ca-file") + } + + cm.Data["client-ca.crt"] = clientCA + + return cm, nil +} + // RotateGRPCSecret rotates key material for Thanos GRPC TLS based communication. // // If no key material is present, it creates it. diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index 14e76bd796..485844b33f 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -16,22 +16,29 @@ package operator import ( "context" + "crypto/x509/pkix" "fmt" "strings" "time" - configv1 "github.com/openshift/api/config/v1" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + + certapiv1 "k8s.io/api/certificates/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/informers" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/operator/csr" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/cluster-monitoring-operator/pkg/client" "github.com/openshift/cluster-monitoring-operator/pkg/manifests" "github.com/openshift/cluster-monitoring-operator/pkg/tasks" @@ -118,6 +125,7 @@ const ( telemeterCABundleConfigMap = "openshift-monitoring/telemeter-trusted-ca-bundle" alertmanagerCABundleConfigMap = "openshift-monitoring/alertmanager-trusted-ca-bundle" grpcTLS = "openshift-monitoring/grpc-tls" + metricsClientCerts = "openshift-monitoring/metrics-client-certs" // Canonical name of the cluster-wide infrastrucure resource. clusterResourceName = "cluster" @@ -139,8 +147,10 @@ type Operator struct { client *client.Client - cmapInf cache.SharedIndexInformer - informers []cache.SharedIndexInformer + cmapInf cache.SharedIndexInformer + informers []cache.SharedIndexInformer + informerFactories []informers.SharedInformerFactory + controllersToRunFunc []func(ctx context.Context, workers int) queue workqueue.RateLimitingInterface @@ -179,6 +189,8 @@ func New( queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(50*time.Millisecond, 3*time.Minute), "cluster-monitoring"), informers: make([]cache.SharedIndexInformer, 0), assets: a, + informerFactories: make([]informers.SharedInformerFactory, 0), + controllersToRunFunc: make([]func(context.Context, int), 0), } informer := cache.NewSharedIndexInformer( @@ -246,6 +258,50 @@ func New( }) o.informers = append(o.informers, informer) + kubeInformersOperatorNS := informers.NewSharedInformerFactoryWithOptions( + c.KubernetesInterface(), + resyncPeriod, + informers.WithNamespace(namespace), + ) + o.informerFactories = append(o.informerFactories, kubeInformersOperatorNS) + + controllerRef, err := events.GetControllerReferenceForCurrentPod(o.client.KubernetesInterface(), namespace, nil) + if err != nil { + klog.Warningf("unable to get owner reference (falling back to namespace): %v", err) + } + + eventRecorder := events.NewKubeRecorderWithOptions( + o.client.KubernetesInterface().CoreV1().Events(namespace), + events.RecommendedClusterSingletonCorrelatorOptions(), + "cluster-monitoring-operator", + controllerRef, + ) + + csrController := csr.NewClientCertificateController( + csr.ClientCertOption{ + SecretNamespace: "openshift-monitoring", + SecretName: "metrics-client-certs", + }, + csr.CSROption{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "system:openshift:openshift-monitoring-", + Labels: map[string]string{ + "metrics.openshift.io/csr.subject": "prometheus", + }, + }, + Subject: &pkix.Name{CommonName: "system:serviceaccount:openshift-monitoring:prometheus-k8s"}, + SignerName: certapiv1.KubeAPIServerClientSignerName, + }, + kubeInformersOperatorNS.Certificates().V1().CertificateSigningRequests(), + o.client.KubernetesInterface().CertificatesV1().CertificateSigningRequests(), + kubeInformersOperatorNS.Core().V1().Secrets(), + o.client.KubernetesInterface().CoreV1(), + eventRecorder, + "OpenShiftMonitoringClientCertRequester", + ) + + o.controllersToRunFunc = append(o.controllersToRunFunc, csrController.Run) + return o, nil } @@ -268,7 +324,8 @@ func (o *Operator) RegisterMetrics(r prometheus.Registerer) { } // Run the controller. -func (o *Operator) Run(stopc <-chan struct{}) error { +func (o *Operator) Run(ctx context.Context) error { + stopc := ctx.Done() defer o.queue.ShutDown() errChan := make(chan error) @@ -298,14 +355,24 @@ func (o *Operator) Run(stopc <-chan struct{}) error { go inf.Run(stopc) synced = append(synced, inf.HasSynced) } + for _, f := range o.informerFactories { + f.Start(stopc) + } klog.V(4).Info("Waiting for initial cache sync.") ok := cache.WaitForCacheSync(stopc, synced...) if !ok { return errors.New("failed to sync informers") } + for _, f := range o.informerFactories { + f.WaitForCacheSync(stopc) + } klog.V(4).Info("Initial cache sync done.") + for _, r := range o.controllersToRunFunc { + go r(ctx, 1) + } + go o.worker() ticker := time.NewTicker(5 * time.Minute) @@ -368,6 +435,7 @@ func (o *Operator) handleEvent(obj interface{}) { case telemeterCABundleConfigMap: case alertmanagerCABundleConfigMap: case grpcTLS: + case metricsClientCerts: case uwmConfigMap: default: klog.V(5).Infof("ConfigMap or Secret (%s) not triggering an update.", key) @@ -448,6 +516,7 @@ func (o *Operator) sync(key string) error { // update prometheus-operator before anything else because it is responsible for managing many other resources (e.g. Prometheus, Alertmanager, Thanos Ruler, ...). tasks.NewTaskGroup( []*tasks.TaskSpec{ + tasks.NewTaskSpec("Updating metrics scraping client CA", tasks.NewMetricsClientCATask(o.client, factory)), tasks.NewTaskSpec("Updating Prometheus Operator", tasks.NewPrometheusOperatorTask(o.client, factory)), }), tasks.NewTaskGroup( diff --git a/pkg/tasks/metrics_client_ca.go b/pkg/tasks/metrics_client_ca.go new file mode 100644 index 0000000000..1409db7354 --- /dev/null +++ b/pkg/tasks/metrics_client_ca.go @@ -0,0 +1,42 @@ +package tasks + +import ( + "github.com/openshift/cluster-monitoring-operator/pkg/client" + "github.com/openshift/cluster-monitoring-operator/pkg/manifests" + "github.com/pkg/errors" +) + +type MetricsClientCATask struct { + client *client.Client + factory *manifests.Factory +} + +// NewMetricsClientCATask returns and instance of MetricsClientCATask which creates +// and updates the client-CA ConfigMap that is required by our deployments of the +// kube-rbac-proxy in order to be able to authenticate client-cert authenticated +// metrics requests +func NewMetricsClientCATask(client *client.Client, factory *manifests.Factory) *MetricsClientCATask { + return &MetricsClientCATask{ + client: client, + factory: factory, + } +} + +func (t *MetricsClientCATask) Run() error { + apiAuthConfigmap, err := t.client.GetConfigmap("kube-system", "extension-apiserver-authentication") + if err != nil { + return errors.Wrap(err, "failed to load kube-system/extension-apiserver-authentication configmap") + } + + cm, err := t.factory.MetricsClientCACM(apiAuthConfigmap) + if err != nil { + return errors.Wrap(err, "initializing Metrics Client CA failed") + } + + err = t.client.CreateOrUpdateConfigMap(cm) + if err != nil { + return errors.Wrap(err, "reconciling Metrics Client CA ConfigMap failed") + } + + return nil +} diff --git a/pkg/tasks/prometheus.go b/pkg/tasks/prometheus.go index c46df33989..f31da875a6 100644 --- a/pkg/tasks/prometheus.go +++ b/pkg/tasks/prometheus.go @@ -251,6 +251,17 @@ func (t *PrometheusTask) Run() error { return errors.Wrap(err, "reconciling Thanos sidecar Service failed") } + // There is no need to hash metrics client certs as Prometheus does that in-process. + metricsCerts, err := t.factory.MetricsClientCerts() + if err != nil { + return errors.Wrap(err, "initializing Metrics Client Certs secret failed") + } + + metricsCerts, err = t.client.WaitForSecret(metricsCerts) + if err != nil { + return errors.Wrap(err, "waiting for Metrics Client Certs secret failed") + } + grpcTLS, err := t.factory.GRPCSecret() if err != nil { return errors.Wrap(err, "initializing Prometheus GRPC secret failed") diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml new file mode 100644 index 0000000000..102fb9a691 --- /dev/null +++ b/vendor/github.com/blang/semver/.travis.yml @@ -0,0 +1,21 @@ +language: go +matrix: + include: + - go: 1.4.3 + - go: 1.5.4 + - go: 1.6.3 + - go: 1.7 + - go: tip + allow_failures: + - go: tip +install: +- go get golang.org/x/tools/cmd/cover +- go get github.com/mattn/goveralls +script: +- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci + -repotoken $COVERALLS_TOKEN +- echo "Build examples" ; cd examples && go build +- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .) +env: + global: + secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw= diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE new file mode 100644 index 0000000000..5ba5c86fcb --- /dev/null +++ b/vendor/github.com/blang/semver/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Benedikt Lang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md new file mode 100644 index 0000000000..08b2e4a3d7 --- /dev/null +++ b/vendor/github.com/blang/semver/README.md @@ -0,0 +1,194 @@ +semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) +====== + +semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. + +Usage +----- +```bash +$ go get github.com/blang/semver +``` +Note: Always vendor your dependencies or fix on a specific version tag. + +```go +import github.com/blang/semver +v1, err := semver.Make("1.0.0-beta") +v2, err := semver.Make("2.0.0-beta") +v1.Compare(v2) +``` + +Also check the [GoDocs](http://godoc.org/github.com/blang/semver). + +Why should I use this lib? +----- + +- Fully spec compatible +- No reflection +- No regex +- Fully tested (Coverage >99%) +- Readable parsing/validation errors +- Fast (See [Benchmarks](#benchmarks)) +- Only Stdlib +- Uses values instead of pointers +- Many features, see below + + +Features +----- + +- Parsing and validation at all levels +- Comparator-like comparisons +- Compare Helper Methods +- InPlace manipulation +- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` +- Wildcards `>=1.x`, `<=2.5.x` +- Sortable (implements sort.Interface) +- database/sql compatible (sql.Scanner/Valuer) +- encoding/json compatible (json.Marshaler/Unmarshaler) + +Ranges +------ + +A `Range` is a set of conditions which specify which versions satisfy the range. + +A condition is composed of an operator and a version. The supported operators are: + +- `<1.0.0` Less than `1.0.0` +- `<=1.0.0` Less than or equal to `1.0.0` +- `>1.0.0` Greater than `1.0.0` +- `>=1.0.0` Greater than or equal to `1.0.0` +- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` +- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. + +Note that spaces between the operator and the version will be gracefully tolerated. + +A `Range` can link multiple `Ranges` separated by space: + +Ranges can be linked by logical AND: + + - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` + - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` + +Ranges can also be linked by logical OR: + + - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` + +AND has a higher precedence than OR. It's not possible to use brackets. + +Ranges can be combined by both AND and OR + + - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` + +Range usage: + +``` +v, err := semver.Parse("1.2.3") +range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") +if range(v) { + //valid +} + +``` + +Example +----- + +Have a look at full examples in [examples/main.go](examples/main.go) + +```go +import github.com/blang/semver + +v, err := semver.Make("0.0.1-alpha.preview+123.github") +fmt.Printf("Major: %d\n", v.Major) +fmt.Printf("Minor: %d\n", v.Minor) +fmt.Printf("Patch: %d\n", v.Patch) +fmt.Printf("Pre: %s\n", v.Pre) +fmt.Printf("Build: %s\n", v.Build) + +// Prerelease versions array +if len(v.Pre) > 0 { + fmt.Println("Prerelease versions:") + for i, pre := range v.Pre { + fmt.Printf("%d: %q\n", i, pre) + } +} + +// Build meta data array +if len(v.Build) > 0 { + fmt.Println("Build meta data:") + for i, build := range v.Build { + fmt.Printf("%d: %q\n", i, build) + } +} + +v001, err := semver.Make("0.0.1") +// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE +v001.GT(v) == true +v.LT(v001) == true +v.GTE(v) == true +v.LTE(v) == true + +// Or use v.Compare(v2) for comparisons (-1, 0, 1): +v001.Compare(v) == 1 +v.Compare(v001) == -1 +v.Compare(v) == 0 + +// Manipulate Version in place: +v.Pre[0], err = semver.NewPRVersion("beta") +if err != nil { + fmt.Printf("Error parsing pre release version: %q", err) +} + +fmt.Println("\nValidate versions:") +v.Build[0] = "?" + +err = v.Validate() +if err != nil { + fmt.Printf("Validation failed: %s\n", err) +} +``` + + +Benchmarks +----- + + BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op + BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op + BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op + BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op + BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op + BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op + BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op + BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op + BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op + BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op + BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op + BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op + BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op + BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op + BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op + BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op + +See benchmark cases at [semver_test.go](semver_test.go) + + +Motivation +----- + +I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. + + +Contribution +----- + +Feel free to make a pull request. For bigger changes create a issue first to discuss about it. + + +License +----- + +See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go new file mode 100644 index 0000000000..a74bf7c449 --- /dev/null +++ b/vendor/github.com/blang/semver/json.go @@ -0,0 +1,23 @@ +package semver + +import ( + "encoding/json" +) + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *Version) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + *v, err = Parse(versionString) + + return +} diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json new file mode 100644 index 0000000000..1cf8ebdd9c --- /dev/null +++ b/vendor/github.com/blang/semver/package.json @@ -0,0 +1,17 @@ +{ + "author": "blang", + "bugs": { + "URL": "https://github.com/blang/semver/issues", + "url": "https://github.com/blang/semver/issues" + }, + "gx": { + "dvcsimport": "github.com/blang/semver" + }, + "gxVersion": "0.10.0", + "language": "go", + "license": "MIT", + "name": "semver", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "3.5.1" +} + diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go new file mode 100644 index 0000000000..fca406d479 --- /dev/null +++ b/vendor/github.com/blang/semver/range.go @@ -0,0 +1,416 @@ +package semver + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +type wildcardType int + +const ( + noneWildcard wildcardType = iota + majorWildcard wildcardType = 1 + minorWildcard wildcardType = 2 + patchWildcard wildcardType = 3 +) + +func wildcardTypefromInt(i int) wildcardType { + switch i { + case 1: + return majorWildcard + case 2: + return minorWildcard + case 3: + return patchWildcard + default: + return noneWildcard + } +} + +type comparator func(Version, Version) bool + +var ( + compEQ comparator = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 0 + } + compNE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) != 0 + } + compGT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 1 + } + compGE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) >= 0 + } + compLT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == -1 + } + compLE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) <= 0 + } +) + +type versionRange struct { + v Version + c comparator +} + +// rangeFunc creates a Range from the given versionRange. +func (vr *versionRange) rangeFunc() Range { + return Range(func(v Version) bool { + return vr.c(v, vr.v) + }) +} + +// Range represents a range of versions. +// A Range can be used to check if a Version satisfies it: +// +// range, err := semver.ParseRange(">1.0.0 <2.0.0") +// range(semver.MustParse("1.1.1") // returns true +type Range func(Version) bool + +// OR combines the existing Range with another Range using logical OR. +func (rf Range) OR(f Range) Range { + return Range(func(v Version) bool { + return rf(v) || f(v) + }) +} + +// AND combines the existing Range with another Range using logical AND. +func (rf Range) AND(f Range) Range { + return Range(func(v Version) bool { + return rf(v) && f(v) + }) +} + +// ParseRange parses a range and returns a Range. +// If the range could not be parsed an error is returned. +// +// Valid ranges are: +// - "<1.0.0" +// - "<=1.0.0" +// - ">1.0.0" +// - ">=1.0.0" +// - "1.0.0", "=1.0.0", "==1.0.0" +// - "!1.0.0", "!=1.0.0" +// +// A Range can consist of multiple ranges separated by space: +// Ranges can be linked by logical AND: +// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" +// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 +// +// Ranges can also be linked by logical OR: +// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" +// +// AND has a higher precedence than OR. It's not possible to use brackets. +// +// Ranges can be combined by both AND and OR +// +// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` +func ParseRange(s string) (Range, error) { + parts := splitAndTrim(s) + orParts, err := splitORParts(parts) + if err != nil { + return nil, err + } + expandedParts, err := expandWildcardVersion(orParts) + if err != nil { + return nil, err + } + var orFn Range + for _, p := range expandedParts { + var andFn Range + for _, ap := range p { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + vr, err := buildVersionRange(opStr, vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) + } + rf := vr.rangeFunc() + + // Set function + if andFn == nil { + andFn = rf + } else { // Combine with existing function + andFn = andFn.AND(rf) + } + } + if orFn == nil { + orFn = andFn + } else { + orFn = orFn.OR(andFn) + } + + } + return orFn, nil +} + +// splitORParts splits the already cleaned parts by '||'. +// Checks for invalid positions of the operator and returns an +// error if found. +func splitORParts(parts []string) ([][]string, error) { + var ORparts [][]string + last := 0 + for i, p := range parts { + if p == "||" { + if i == 0 { + return nil, fmt.Errorf("First element in range is '||'") + } + ORparts = append(ORparts, parts[last:i]) + last = i + 1 + } + } + if last == len(parts) { + return nil, fmt.Errorf("Last element in range is '||'") + } + ORparts = append(ORparts, parts[last:]) + return ORparts, nil +} + +// buildVersionRange takes a slice of 2: operator and version +// and builds a versionRange, otherwise an error. +func buildVersionRange(opStr, vStr string) (*versionRange, error) { + c := parseComparator(opStr) + if c == nil { + return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) + } + v, err := Parse(vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) + } + + return &versionRange{ + v: v, + c: c, + }, nil + +} + +// inArray checks if a byte is contained in an array of bytes +func inArray(s byte, list []byte) bool { + for _, el := range list { + if el == s { + return true + } + } + return false +} + +// splitAndTrim splits a range string by spaces and cleans whitespaces +func splitAndTrim(s string) (result []string) { + last := 0 + var lastChar byte + excludeFromSplit := []byte{'>', '<', '='} + for i := 0; i < len(s); i++ { + if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { + if last < i-1 { + result = append(result, s[last:i]) + } + last = i + 1 + } else if s[i] != ' ' { + lastChar = s[i] + } + } + if last < len(s)-1 { + result = append(result, s[last:]) + } + + for i, v := range result { + result[i] = strings.Replace(v, " ", "", -1) + } + + // parts := strings.Split(s, " ") + // for _, x := range parts { + // if s := strings.TrimSpace(x); len(s) != 0 { + // result = append(result, s) + // } + // } + return +} + +// splitComparatorVersion splits the comparator from the version. +// Input must be free of leading or trailing spaces. +func splitComparatorVersion(s string) (string, string, error) { + i := strings.IndexFunc(s, unicode.IsDigit) + if i == -1 { + return "", "", fmt.Errorf("Could not get version from string: %q", s) + } + return strings.TrimSpace(s[0:i]), s[i:], nil +} + +// getWildcardType will return the type of wildcard that the +// passed version contains +func getWildcardType(vStr string) wildcardType { + parts := strings.Split(vStr, ".") + nparts := len(parts) + wildcard := parts[nparts-1] + + possibleWildcardType := wildcardTypefromInt(nparts) + if wildcard == "x" { + return possibleWildcardType + } + + return noneWildcard +} + +// createVersionFromWildcard will convert a wildcard version +// into a regular version, replacing 'x's with '0's, handling +// special cases like '1.x.x' and '1.x' +func createVersionFromWildcard(vStr string) string { + // handle 1.x.x + vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) + vStr2 = strings.Replace(vStr2, ".x", ".0", 1) + parts := strings.Split(vStr2, ".") + + // handle 1.x + if len(parts) == 2 { + return vStr2 + ".0" + } + + return vStr2 +} + +// incrementMajorVersion will increment the major version +// of the passed version +func incrementMajorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + parts[0] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// incrementMajorVersion will increment the minor version +// of the passed version +func incrementMinorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[1]) + if err != nil { + return "", err + } + parts[1] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// expandWildcardVersion will expand wildcards inside versions +// following these rules: +// +// * when dealing with patch wildcards: +// >= 1.2.x will become >= 1.2.0 +// <= 1.2.x will become < 1.3.0 +// > 1.2.x will become >= 1.3.0 +// < 1.2.x will become < 1.2.0 +// != 1.2.x will become < 1.2.0 >= 1.3.0 +// +// * when dealing with minor wildcards: +// >= 1.x will become >= 1.0.0 +// <= 1.x will become < 2.0.0 +// > 1.x will become >= 2.0.0 +// < 1.0 will become < 1.0.0 +// != 1.x will become < 1.0.0 >= 2.0.0 +// +// * when dealing with wildcards without +// version operator: +// 1.2.x will become >= 1.2.0 < 1.3.0 +// 1.x will become >= 1.0.0 < 2.0.0 +func expandWildcardVersion(parts [][]string) ([][]string, error) { + var expandedParts [][]string + for _, p := range parts { + var newParts []string + for _, ap := range p { + if strings.Index(ap, "x") != -1 { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + + versionWildcardType := getWildcardType(vStr) + flatVersion := createVersionFromWildcard(vStr) + + var resultOperator string + var shouldIncrementVersion bool + switch opStr { + case ">": + resultOperator = ">=" + shouldIncrementVersion = true + case ">=": + resultOperator = ">=" + case "<": + resultOperator = "<" + case "<=": + resultOperator = "<" + shouldIncrementVersion = true + case "", "=", "==": + newParts = append(newParts, ">="+flatVersion) + resultOperator = "<" + shouldIncrementVersion = true + case "!=", "!": + newParts = append(newParts, "<"+flatVersion) + resultOperator = ">=" + shouldIncrementVersion = true + } + + var resultVersion string + if shouldIncrementVersion { + switch versionWildcardType { + case patchWildcard: + resultVersion, _ = incrementMinorVersion(flatVersion) + case minorWildcard: + resultVersion, _ = incrementMajorVersion(flatVersion) + } + } else { + resultVersion = flatVersion + } + + ap = resultOperator + resultVersion + } + newParts = append(newParts, ap) + } + expandedParts = append(expandedParts, newParts) + } + + return expandedParts, nil +} + +func parseComparator(s string) comparator { + switch s { + case "==": + fallthrough + case "": + fallthrough + case "=": + return compEQ + case ">": + return compGT + case ">=": + return compGE + case "<": + return compLT + case "<=": + return compLE + case "!": + fallthrough + case "!=": + return compNE + } + + return nil +} + +// MustParseRange is like ParseRange but panics if the range cannot be parsed. +func MustParseRange(s string) Range { + r, err := ParseRange(s) + if err != nil { + panic(`semver: ParseRange(` + s + `): ` + err.Error()) + } + return r +} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go new file mode 100644 index 0000000000..8ee0842e6a --- /dev/null +++ b/vendor/github.com/blang/semver/semver.go @@ -0,0 +1,418 @@ +package semver + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + numbers string = "0123456789" + alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + alphanum = alphas + numbers +) + +// SpecVersion is the latest fully supported spec version of semver +var SpecVersion = Version{ + Major: 2, + Minor: 0, + Patch: 0, +} + +// Version represents a semver compatible version +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 + Pre []PRVersion + Build []string //No Precendence +} + +// Version to string +func (v Version) String() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + + if len(v.Pre) > 0 { + b = append(b, '-') + b = append(b, v.Pre[0].String()...) + + for _, pre := range v.Pre[1:] { + b = append(b, '.') + b = append(b, pre.String()...) + } + } + + if len(v.Build) > 0 { + b = append(b, '+') + b = append(b, v.Build[0]...) + + for _, build := range v.Build[1:] { + b = append(b, '.') + b = append(b, build...) + } + } + + return string(b) +} + +// Equals checks if v is equal to o. +func (v Version) Equals(o Version) bool { + return (v.Compare(o) == 0) +} + +// EQ checks if v is equal to o. +func (v Version) EQ(o Version) bool { + return (v.Compare(o) == 0) +} + +// NE checks if v is not equal to o. +func (v Version) NE(o Version) bool { + return (v.Compare(o) != 0) +} + +// GT checks if v is greater than o. +func (v Version) GT(o Version) bool { + return (v.Compare(o) == 1) +} + +// GTE checks if v is greater than or equal to o. +func (v Version) GTE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// GE checks if v is greater than or equal to o. +func (v Version) GE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// LT checks if v is less than o. +func (v Version) LT(o Version) bool { + return (v.Compare(o) == -1) +} + +// LTE checks if v is less than or equal to o. +func (v Version) LTE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// LE checks if v is less than or equal to o. +func (v Version) LE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// Compare compares Versions v to o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v Version) Compare(o Version) int { + if v.Major != o.Major { + if v.Major > o.Major { + return 1 + } + return -1 + } + if v.Minor != o.Minor { + if v.Minor > o.Minor { + return 1 + } + return -1 + } + if v.Patch != o.Patch { + if v.Patch > o.Patch { + return 1 + } + return -1 + } + + // Quick comparison if a version has no prerelease versions + if len(v.Pre) == 0 && len(o.Pre) == 0 { + return 0 + } else if len(v.Pre) == 0 && len(o.Pre) > 0 { + return 1 + } else if len(v.Pre) > 0 && len(o.Pre) == 0 { + return -1 + } + + i := 0 + for ; i < len(v.Pre) && i < len(o.Pre); i++ { + if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { + continue + } else if comp == 1 { + return 1 + } else { + return -1 + } + } + + // If all pr versions are the equal but one has further prversion, this one greater + if i == len(v.Pre) && i == len(o.Pre) { + return 0 + } else if i == len(v.Pre) && i < len(o.Pre) { + return -1 + } else { + return 1 + } + +} + +// Validate validates v and returns error in case +func (v Version) Validate() error { + // Major, Minor, Patch already validated using uint64 + + for _, pre := range v.Pre { + if !pre.IsNum { //Numeric prerelease versions already uint64 + if len(pre.VersionStr) == 0 { + return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) + } + if !containsOnly(pre.VersionStr, alphanum) { + return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) + } + } + } + + for _, build := range v.Build { + if len(build) == 0 { + return fmt.Errorf("Build meta data can not be empty %q", build) + } + if !containsOnly(build, alphanum) { + return fmt.Errorf("Invalid character(s) found in build meta data %q", build) + } + } + + return nil +} + +// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error +func New(s string) (vp *Version, err error) { + v, err := Parse(s) + vp = &v + return +} + +// Make is an alias for Parse, parses version string and returns a validated Version or error +func Make(s string) (Version, error) { + return Parse(s) +} + +// ParseTolerant allows for certain version specifications that do not strictly adhere to semver +// specs to be parsed by this library. It does so by normalizing versions before passing them to +// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions +// with only major and minor components specified +func ParseTolerant(s string) (Version, error) { + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, "v") + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) < 3 { + if strings.ContainsAny(parts[len(parts)-1], "+-") { + return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") + } + for len(parts) < 3 { + parts = append(parts, "0") + } + s = strings.Join(parts, ".") + } + + return Parse(s) +} + +// Parse parses version string and returns a validated Version or error +func Parse(s string) (Version, error) { + if len(s) == 0 { + return Version{}, errors.New("Version string empty") + } + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) != 3 { + return Version{}, errors.New("No Major.Minor.Patch elements found") + } + + // Major + if !containsOnly(parts[0], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) + } + if hasLeadingZeroes(parts[0]) { + return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) + } + major, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return Version{}, err + } + + // Minor + if !containsOnly(parts[1], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) + } + if hasLeadingZeroes(parts[1]) { + return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) + } + minor, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return Version{}, err + } + + v := Version{} + v.Major = major + v.Minor = minor + + var build, prerelease []string + patchStr := parts[2] + + if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { + build = strings.Split(patchStr[buildIndex+1:], ".") + patchStr = patchStr[:buildIndex] + } + + if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { + prerelease = strings.Split(patchStr[preIndex+1:], ".") + patchStr = patchStr[:preIndex] + } + + if !containsOnly(patchStr, numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) + } + if hasLeadingZeroes(patchStr) { + return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) + } + patch, err := strconv.ParseUint(patchStr, 10, 64) + if err != nil { + return Version{}, err + } + + v.Patch = patch + + // Prerelease + for _, prstr := range prerelease { + parsedPR, err := NewPRVersion(prstr) + if err != nil { + return Version{}, err + } + v.Pre = append(v.Pre, parsedPR) + } + + // Build meta data + for _, str := range build { + if len(str) == 0 { + return Version{}, errors.New("Build meta data is empty") + } + if !containsOnly(str, alphanum) { + return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) + } + v.Build = append(v.Build, str) + } + + return v, nil +} + +// MustParse is like Parse but panics if the version cannot be parsed. +func MustParse(s string) Version { + v, err := Parse(s) + if err != nil { + panic(`semver: Parse(` + s + `): ` + err.Error()) + } + return v +} + +// PRVersion represents a PreRelease Version +type PRVersion struct { + VersionStr string + VersionNum uint64 + IsNum bool +} + +// NewPRVersion creates a new valid prerelease version +func NewPRVersion(s string) (PRVersion, error) { + if len(s) == 0 { + return PRVersion{}, errors.New("Prerelease is empty") + } + v := PRVersion{} + if containsOnly(s, numbers) { + if hasLeadingZeroes(s) { + return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) + } + num, err := strconv.ParseUint(s, 10, 64) + + // Might never be hit, but just in case + if err != nil { + return PRVersion{}, err + } + v.VersionNum = num + v.IsNum = true + } else if containsOnly(s, alphanum) { + v.VersionStr = s + v.IsNum = false + } else { + return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) + } + return v, nil +} + +// IsNumeric checks if prerelease-version is numeric +func (v PRVersion) IsNumeric() bool { + return v.IsNum +} + +// Compare compares two PreRelease Versions v and o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v PRVersion) Compare(o PRVersion) int { + if v.IsNum && !o.IsNum { + return -1 + } else if !v.IsNum && o.IsNum { + return 1 + } else if v.IsNum && o.IsNum { + if v.VersionNum == o.VersionNum { + return 0 + } else if v.VersionNum > o.VersionNum { + return 1 + } else { + return -1 + } + } else { // both are Alphas + if v.VersionStr == o.VersionStr { + return 0 + } else if v.VersionStr > o.VersionStr { + return 1 + } else { + return -1 + } + } +} + +// PreRelease version to string +func (v PRVersion) String() string { + if v.IsNum { + return strconv.FormatUint(v.VersionNum, 10) + } + return v.VersionStr +} + +func containsOnly(s string, set string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(set, r) + }) == -1 +} + +func hasLeadingZeroes(s string) bool { + return len(s) > 1 && s[0] == '0' +} + +// NewBuildVersion creates a new valid build version +func NewBuildVersion(s string) (string, error) { + if len(s) == 0 { + return "", errors.New("Buildversion is empty") + } + if !containsOnly(s, alphanum) { + return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) + } + return s, nil +} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go new file mode 100644 index 0000000000..e18f880826 --- /dev/null +++ b/vendor/github.com/blang/semver/sort.go @@ -0,0 +1,28 @@ +package semver + +import ( + "sort" +) + +// Versions represents multiple versions. +type Versions []Version + +// Len returns length of version collection +func (s Versions) Len() int { + return len(s) +} + +// Swap swaps two versions inside the collection by its indices +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less checks if version at index i is less than version at index j +func (s Versions) Less(i, j int) bool { + return s[i].LT(s[j]) +} + +// Sort sorts a slice of versions +func Sort(versions []Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go new file mode 100644 index 0000000000..eb4d802666 --- /dev/null +++ b/vendor/github.com/blang/semver/sql.go @@ -0,0 +1,30 @@ +package semver + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements the database/sql.Scanner interface. +func (v *Version) Scan(src interface{}) (err error) { + var str string + switch src := src.(type) { + case string: + str = src + case []byte: + str = string(src) + default: + return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) + } + + if t, err := Parse(str); err == nil { + *v = t + } + + return +} + +// Value implements the database/sql/driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/vendor/github.com/golang/groupcache/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go new file mode 100644 index 0000000000..eac1c7664f --- /dev/null +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -0,0 +1,133 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package lru + +import "container/list" + +// Cache is an LRU cache. It is not safe for concurrent access. +type Cache struct { + // MaxEntries is the maximum number of cache entries before + // an item is evicted. Zero means no limit. + MaxEntries int + + // OnEvicted optionally specifies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key Key, value interface{}) + + ll *list.List + cache map[interface{}]*list.Element +} + +// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators +type Key interface{} + +type entry struct { + key Key + value interface{} +} + +// New creates a new Cache. +// If maxEntries is zero, the cache has no limit and it's assumed +// that eviction is done by the caller. +func New(maxEntries int) *Cache { + return &Cache{ + MaxEntries: maxEntries, + ll: list.New(), + cache: make(map[interface{}]*list.Element), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key Key, value interface{}) { + if c.cache == nil { + c.cache = make(map[interface{}]*list.Element) + c.ll = list.New() + } + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { + c.RemoveOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key Key) (value interface{}, ok bool) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key Key) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.removeElement(ele) + } +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + if c.cache == nil { + return + } + ele := c.ll.Back() + if ele != nil { + c.removeElement(ele) + } +} + +func (c *Cache) removeElement(e *list.Element) { + c.ll.Remove(e) + kv := e.Value.(*entry) + delete(c.cache, kv.key) + if c.OnEvicted != nil { + c.OnEvicted(kv.key, kv.value) + } +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + if c.cache == nil { + return 0 + } + return c.ll.Len() +} + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + if c.OnEvicted != nil { + for _, e := range c.cache { + kv := e.Value.(*entry) + c.OnEvicted(kv.key, kv.value) + } + } + c.ll = nil + c.cache = nil +} diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml index 2bf2712830..7e5d27027c 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml @@ -1,169 +1,137 @@ +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition -apiVersion: apiextensions.k8s.io/v1beta1 metadata: - name: clusteroperators.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/497 include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: clusteroperators.config.openshift.io spec: - additionalPrinterColumns: - - JSONPath: .status.versions[?(@.name=="operator")].version - description: The version the operator is at. - name: Version - type: string - - JSONPath: .status.conditions[?(@.type=="Available")].status - description: Whether the operator is running and stable. - name: Available - type: string - - JSONPath: .status.conditions[?(@.type=="Progressing")].status - description: Whether the operator is processing changes. - name: Progressing - type: string - - JSONPath: .status.conditions[?(@.type=="Degraded")].status - description: Whether the operator is degraded. - name: Degraded - type: string - - JSONPath: .status.conditions[?(@.type=="Available")].lastTransitionTime - description: The time the operator's Available status last changed. - name: Since - type: date group: config.openshift.io names: kind: ClusterOperator listKind: ClusterOperatorList plural: clusteroperators - singular: clusteroperator shortNames: - - co - preserveUnknownFields: false + - co + singular: clusteroperator scope: Cluster - subresources: - status: {} - version: v1 versions: - - name: v1 - served: true - storage: true - validation: - openAPIV3Schema: - description: ClusterOperator is the Custom Resource object which holds the current - state of an operator. This object is used by operators to convey their state - to the rest of the cluster. - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + - additionalPrinterColumns: + - description: The version the operator is at. + jsonPath: .status.versions[?(@.name=="operator")].version + name: Version type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + - description: Whether the operator is running and stable. + jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available type: string - metadata: - type: object - spec: - description: spec holds configuration that could apply to any operator. - type: object - status: - description: status holds the information about the state of an operator. It - is consistent with status information across the Kubernetes ecosystem. + - description: Whether the operator is processing changes. + jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - description: Whether the operator is degraded. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: The time the operator's Available status last changed. + jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime + name: Since + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster. type: object + required: + - spec properties: - conditions: - description: conditions describes the state of the operator's managed - and monitored components. - type: array - items: - description: ClusterOperatorStatusCondition represents the state of - the operator's managed and monitored components. - type: object - required: - - lastTransitionTime - - status - - type - properties: - lastTransitionTime: - description: lastTransitionTime is the time of the last update - to the current status property. - type: string - format: date-time - message: - description: message provides additional information about the - current condition. This is only to be consumed by humans. It - may contain Line Feed characters (U+000A), which should be rendered - as new lines. - type: string - reason: - description: reason is the CamelCase reason for the condition's - current status. - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: type specifies the aspect reported by this condition. - type: string - extension: - description: extension contains any additional status information specific - to the operator which owns this status object. + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - relatedObjects: - description: 'relatedObjects is a list of objects that are "interesting" - or related to this operator. Common uses are: 1. the detailed resource - driving the operator 2. operator namespaces 3. operand namespaces' - type: array - items: - description: ObjectReference contains enough information to let you - inspect or modify the referred object. - type: object - required: - - group - - name - - resource - properties: - group: - description: group of the referent. - type: string - name: - description: name of the referent. - type: string - namespace: - description: namespace of the referent. - type: string - resource: - description: resource of the referent. - type: string - versions: - description: versions is a slice of operator and operand version tuples. Operators - which manage multiple operands will have multiple operand entries - in the array. Available operators must report the version of the - operator itself with the name "operator". An operator reports a new - "operator" version when it has rolled out the new version to all of - its operands. - type: array - items: - type: object - required: - - name - - version - properties: - name: - description: name is the name of the particular operand this version - is for. It usually matches container images, not operators. - type: string - version: - description: version indicates which version of a particular operand - is currently being managed. It must always match the Available - operand. If 1.0.0 is Available, then this must indicate 1.0.0 - even if the operator is trying to rollout 1.1.0 - type: string - versions: - - name: v1 - served: true - storage: true + spec: + description: spec holds configuration that could apply to any operator. + type: object + status: + description: status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem. + type: object + properties: + conditions: + description: conditions describes the state of the operator's managed and monitored components. + type: array + items: + description: ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components. + type: object + required: + - lastTransitionTime + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update to the current status property. + type: string + format: date-time + message: + description: message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + reason: + description: reason is the CamelCase reason for the condition's current status. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the aspect reported by this condition. + type: string + extension: + description: extension contains any additional status information specific to the operator which owns this status object. + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + relatedObjects: + description: 'relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces' + type: array + items: + description: ObjectReference contains enough information to let you inspect or modify the referred object. + type: object + required: + - group + - name + - resource + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + versions: + description: versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name "operator". An operator reports a new "operator" version when it has rolled out the new version to all of its operands. + type: array + items: + type: object + required: + - name + - version + properties: + name: + description: name is the name of the particular operand this version is for. It usually matches container images, not operators. + type: string + version: + description: version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0 + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml index 628538d0e5..25bd91d1e4 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml @@ -1,335 +1,225 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: clusterversions.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/495 include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: clusterversions.config.openshift.io spec: group: config.openshift.io - versions: - - name: v1 - served: true - storage: true - scope: Cluster - subresources: - status: {} names: + kind: ClusterVersion plural: clusterversions singular: clusterversion - kind: ClusterVersion - preserveUnknownFields: false - additionalPrinterColumns: - - name: Version - type: string - JSONPath: .status.history[?(@.state=="Completed")].version - - name: Available - type: string - JSONPath: .status.conditions[?(@.type=="Available")].status - - name: Progressing - type: string - JSONPath: .status.conditions[?(@.type=="Progressing")].status - - name: Since - type: date - JSONPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime - - name: Status - type: string - JSONPath: .status.conditions[?(@.type=="Progressing")].message - validation: - openAPIV3Schema: - description: ClusterVersion is the configuration for the ClusterVersionOperator. - This is where parameters related to automatic updates can be set. - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.history[?(@.state=="Completed")].version + name: Version type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available type: string - metadata: - type: object - spec: - description: spec is the desired state of the cluster version - the operator - will work to ensure that the desired version is applied to the cluster. + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime + name: Since + type: date + - jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set. type: object required: - - clusterID + - spec properties: - channel: - description: channel is an identifier for explicitly requesting that - a non-default set of updates be applied to this cluster. The default - channel will be contain stable updates that are appropriate for production - clusters. + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string - clusterID: - description: clusterID uniquely identifies this cluster. This is expected - to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - in hexadecimal values). This is a required field. + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string - desiredUpdate: - description: "desiredUpdate is an optional field that indicates the - desired value of the cluster version. Setting this value will trigger - an upgrade (if the current version does not match the desired version). - The set of recommended update values is listed as part of available - updates in status, and setting values outside that range may cause - the upgrade to fail. You may specify the version field without setting - image if an update exists with that version in the availableUpdates - or history. \n If an upgrade fails the operator will halt and report - status about the failing component. Setting the desired update value - back to the previous version will cause a rollback to be attempted. - Not all rollbacks will succeed." + metadata: + type: object + spec: + description: spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster. type: object + required: + - clusterID properties: - force: - description: "force allows an administrator to update to an image - that has failed verification, does not appear in the availableUpdates - list, or otherwise would be blocked by normal protections on update. - This option should only be used when the authenticity of the provided - image has been verified out of band because the provided image - will run with full administrative access to the cluster. Do not - use this flag with images that comes from unknown or potentially - malicious sources. \n This flag does not override other forms - of consistency checking that are required before a new update - is deployed." - type: boolean - image: - description: image is a container image location that contains the - update. When this field is part of spec, image is optional if - version is specified and the availableUpdates field contains a - matching version. + channel: + description: channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters. type: string - version: - description: version is a semantic versioning identifying the update - version. When this field is part of spec, version is optional - if image is specified. + clusterID: + description: clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field. type: string - overrides: - description: overrides is list of overides for components that are managed - by cluster version operator. Marking a component unmanaged will prevent - the operator from creating or updating the object. - type: array - items: - description: ComponentOverride allows overriding cluster version operator's - behavior for a component. - type: object - required: - - group - - kind - - name - - namespace - - unmanaged - properties: - group: - description: group identifies the API group that the kind is in. - type: string - kind: - description: kind indentifies which object to override. - type: string - name: - description: name is the component's name. - type: string - namespace: - description: namespace is the component's namespace. If the resource - is cluster scoped, the namespace should be empty. - type: string - unmanaged: - description: 'unmanaged controls if cluster version operator should - stop managing the resources in this cluster. Default: false' - type: boolean - upstream: - description: upstream may be used to specify the preferred update server. - By default it will use the appropriate update server for the cluster - and region. - type: string - status: - description: status contains information about the available updates and - any in-progress updates. - type: object - required: - - availableUpdates - - desired - - observedGeneration - - versionHash - properties: - availableUpdates: - description: availableUpdates contains the list of updates that are - appropriate for this cluster. This list may be empty if no updates - are recommended, if the update service is unavailable, or if an invalid - channel has been specified. - type: array - items: - description: Release represents an OpenShift release image and associated - metadata. - type: object - properties: - channels: - description: channels is the set of Cincinnati channels to which - the release currently belongs. - type: array - items: + desiredUpdate: + description: "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. You may specify the version field without setting image if an update exists with that version in the availableUpdates or history. \n If an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed." + type: object + properties: + force: + description: "force allows an administrator to update to an image that has failed verification, does not appear in the availableUpdates list, or otherwise would be blocked by normal protections on update. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources. \n This flag does not override other forms of consistency checking that are required before a new update is deployed." + type: boolean + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. type: string - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is optional - if version is specified and the availableUpdates field contains - a matching version. - type: string - url: - description: url contains information about this release. This - URL is set by the 'url' metadata property on a release or the - metadata returned by the update API and should be displayed - as a link in user interfaces. The URL field may not be set for - test or nightly releases. - type: string - version: - description: version is a semantic versioning identifying the - update version. When this field is part of spec, version is - optional if image is specified. - type: string - nullable: true - conditions: - description: conditions provides information about the cluster version. - The condition "Available" is set to true if the desiredUpdate has - been reached. The condition "Progressing" is set to true if an update - is being applied. The condition "Degraded" is set to true if an update - is currently blocked by a temporary or permanent error. Conditions - are only valid for the current desiredUpdate when metadata.generation - is equal to status.generation. - type: array - items: - description: ClusterOperatorStatusCondition represents the state of - the operator's managed and monitored components. - type: object - required: - - lastTransitionTime - - status - - type - properties: - lastTransitionTime: - description: lastTransitionTime is the time of the last update - to the current status property. - type: string - format: date-time - message: - description: message provides additional information about the - current condition. This is only to be consumed by humans. It - may contain Line Feed characters (U+000A), which should be rendered - as new lines. - type: string - reason: - description: reason is the CamelCase reason for the condition's - current status. - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: type specifies the aspect reported by this condition. - type: string - desired: - description: desired is the version that the cluster is reconciling - towards. If the cluster is not yet fully initialized desired will - be set with the information available, which may be an image or a - tag. + version: + description: version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + overrides: + description: overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object. + type: array + items: + description: ComponentOverride allows overriding cluster version operator's behavior for a component. + type: object + required: + - group + - kind + - name + - namespace + - unmanaged + properties: + group: + description: group identifies the API group that the kind is in. + type: string + kind: + description: kind indentifies which object to override. + type: string + name: + description: name is the component's name. + type: string + namespace: + description: namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty. + type: string + unmanaged: + description: 'unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false' + type: boolean + upstream: + description: upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region. + type: string + status: + description: status contains information about the available updates and any in-progress updates. type: object + required: + - availableUpdates + - desired + - observedGeneration + - versionHash properties: - channels: - description: channels is the set of Cincinnati channels to which - the release currently belongs. + availableUpdates: + description: availableUpdates contains the list of updates that are appropriate for this cluster. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified. type: array items: - type: string - image: - description: image is a container image location that contains the - update. When this field is part of spec, image is optional if - version is specified and the availableUpdates field contains a - matching version. - type: string - url: - description: url contains information about this release. This URL - is set by the 'url' metadata property on a release or the metadata - returned by the update API and should be displayed as a link in - user interfaces. The URL field may not be set for test or nightly - releases. - type: string - version: - description: version is a semantic versioning identifying the update - version. When this field is part of spec, version is optional - if image is specified. + description: Release represents an OpenShift release image and associated metadata. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + nullable: true + conditions: + description: conditions provides information about the cluster version. The condition "Available" is set to true if the desiredUpdate has been reached. The condition "Progressing" is set to true if an update is being applied. The condition "Degraded" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation. + type: array + items: + description: ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components. + type: object + required: + - lastTransitionTime + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update to the current status property. + type: string + format: date-time + message: + description: message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + reason: + description: reason is the CamelCase reason for the condition's current status. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the aspect reported by this condition. + type: string + desired: + description: desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + history: + description: history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved. + type: array + items: + description: UpdateHistory is a single attempted update to the cluster. + type: object + required: + - completionTime + - image + - startedTime + - state + - verified + properties: + completionTime: + description: completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update). + type: string + format: date-time + nullable: true + image: + description: image is a container image location that contains the update. This value is always populated. + type: string + startedTime: + description: startedTime is the time at which the update was started. + type: string + format: date-time + state: + description: state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied). + type: string + verified: + description: verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. + type: boolean + version: + description: version is a semantic versioning identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty. + type: string + observedGeneration: + description: observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version. + type: integer + format: int64 + versionHash: + description: versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only. type: string - history: - description: history contains a list of the most recent versions applied - to the cluster. This value may be empty during cluster startup, and - then will be updated when a new update is being applied. The newest - update is first in the list and it is ordered by recency. Updates - in the history have state Completed if the rollout completed - if - an update was failing or halfway applied the state will be Partial. - Only a limited amount of update history is preserved. - type: array - items: - description: UpdateHistory is a single attempted update to the cluster. - type: object - required: - - completionTime - - image - - startedTime - - state - - verified - properties: - completionTime: - description: completionTime, if set, is when the update was fully - applied. The update that is currently being applied will have - a null completion time. Completion time will always be set for - entries that are not the current update (usually to the started - time of the next update). - type: string - format: date-time - nullable: true - image: - description: image is a container image location that contains - the update. This value is always populated. - type: string - startedTime: - description: startedTime is the time at which the update was started. - type: string - format: date-time - state: - description: state reflects whether the update was fully applied. - The Partial state indicates the update is not fully applied, - while the Completed state indicates the update was successfully - rolled out at least once (all parts of the update successfully - applied). - type: string - verified: - description: verified indicates whether the provided update was - properly verified before it was installed. If this is false - the cluster may not be trusted. - type: boolean - version: - description: version is a semantic versioning identifying the - update version. If the requested image does not define a version, - or if a failure occurs retrieving the image, this value may - be empty. - type: string - observedGeneration: - description: observedGeneration reports which version of the spec is - being synced. If this value is not equal to metadata.generation, then - the desired and conditions fields may represent a previous version. - type: integer - format: int64 - versionHash: - description: versionHash is a fingerprint of the content that the cluster - will be updated with. It is used by the operator to avoid unnecessary - work and is for internal use only. - type: string - versions: - - name: v1 - served: true - storage: true + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml index 18e6187e6e..8b206f68e7 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml @@ -1,11 +1,12 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: operatorhubs.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: operatorhubs.config.openshift.io spec: group: config.openshift.io names: @@ -14,92 +15,69 @@ spec: plural: operatorhubs singular: operatorhub scope: Cluster - preserveUnknownFields: false - subresources: - status: {} - version: v1 versions: - - name: v1 - served: true - storage: true - "validation": - "openAPIV3Schema": - description: OperatorHub is the Schema for the operatorhubs API. It can be used - to change the state of the default hub sources for OperatorHub on the cluster - from enabled to disabled and vice versa. - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: OperatorHubSpec defines the desired state of OperatorHub - type: object - properties: - disableAllDefaultSources: - description: disableAllDefaultSources allows you to disable all the - default hub sources. If this is true, a specific entry in sources - can be used to enable a default source. If this is false, a specific - entry in sources can be used to disable or enable a default source. - type: boolean - sources: - description: sources is the list of default hub sources and their configuration. - If the list is empty, it implies that the default hub sources are - enabled on the cluster unless disableAllDefaultSources is true. If - disableAllDefaultSources is true and sources is not empty, the configuration - present in sources will take precedence. The list of default hub sources - and their current state will always be reflected in the status block. - type: array - items: - description: HubSource is used to specify the hub source and its configuration - type: object - properties: - disabled: - description: disabled is used to disable a default hub source - on cluster - type: boolean - name: - description: name is the name of one of the default hub sources - type: string - maxLength: 253 - minLength: 1 - status: - description: OperatorHubStatus defines the observed state of OperatorHub. - The current state of the default hub sources will always be reflected - here. + - name: v1 + schema: + openAPIV3Schema: + description: OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa. type: object properties: - sources: - description: sources encapsulates the result of applying the configuration - for each hub source - type: array - items: - description: HubSourceStatus is used to reflect the current state - of applying the configuration to a default source - type: object - properties: - disabled: - description: disabled is used to disable a default hub source - on cluster - type: boolean - message: - description: message provides more information regarding failures - type: string - name: - description: name is the name of one of the default hub sources - type: string - maxLength: 253 - minLength: 1 - status: - description: status indicates success or failure in applying the - configuration - type: string + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OperatorHubSpec defines the desired state of OperatorHub + type: object + properties: + disableAllDefaultSources: + description: disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source. + type: boolean + sources: + description: sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block. + type: array + items: + description: HubSource is used to specify the hub source and its configuration + type: object + properties: + disabled: + description: disabled is used to disable a default hub source on cluster + type: boolean + name: + description: name is the name of one of the default hub sources + type: string + maxLength: 253 + minLength: 1 + status: + description: OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here. + type: object + properties: + sources: + description: sources encapsulates the result of applying the configuration for each hub source + type: array + items: + description: HubSourceStatus is used to reflect the current state of applying the configuration to a default source + type: object + properties: + disabled: + description: disabled is used to disable a default hub source on cluster + type: boolean + message: + description: message provides more information regarding failures + type: string + name: + description: name is the name of one of the default hub sources + type: string + maxLength: 253 + minLength: 1 + status: + description: status indicates success or failure in applying the configuration + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml index ddd5d700de..c20fb10893 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml @@ -1,103 +1,78 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: proxies.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: proxies.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: Proxy listKind: ProxyList plural: proxies singular: proxy + scope: Cluster versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - description: Proxy holds cluster-wide information on how to configure default - proxies for the cluster. The canonical name is `cluster` - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec holds user-settable values for the proxy configuration - type: object - properties: - httpProxy: - description: httpProxy is the URL of the proxy for HTTP requests. Empty - means unset and will not result in an env var. - type: string - httpsProxy: - description: httpsProxy is the URL of the proxy for HTTPS requests. Empty - means unset and will not result in an env var. - type: string - noProxy: - description: noProxy is a comma-separated list of hostnames and/or - CIDRs for which the proxy should not be used. Empty means unset - and will not result in an env var. - type: string - readinessEndpoints: - description: readinessEndpoints is a list of endpoints used to verify - readiness of the proxy. - type: array - items: + - name: v1 + schema: + openAPIV3Schema: + description: Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster` + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec holds user-settable values for the proxy configuration + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. type: string - trustedCA: - description: "trustedCA is a reference to a ConfigMap containing a - CA certificate bundle. The trustedCA field should only be consumed - by a proxy validator. The validator is responsible for reading the - certificate bundle from the required key \"ca-bundle.crt\", merging - it with the system default trust bundle, and writing the merged - trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" - namespace. Clients that expect to make proxy connections must use - the trusted-ca-bundle for all HTTPS requests to the proxy, and may - use the trusted-ca-bundle for non-proxy HTTPS requests as well. - \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". - Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: - ConfigMap metadata: name: user-ca-bundle namespace: openshift-config - \ data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom - CA certificate bundle. -----END CERTIFICATE-----" - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced config - map + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + type: array + items: type: string - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - properties: - httpProxy: - description: httpProxy is the URL of the proxy for HTTP requests. - type: string - httpsProxy: - description: httpsProxy is the URL of the proxy for HTTPS requests. - type: string - noProxy: - description: noProxy is a comma-separated list of hostnames and/or - CIDRs for which the proxy should not be used. - type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well. \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml index bd730570ca..d28dc858b0 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml @@ -1,260 +1,177 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: apiservers.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: apiservers.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: APIServer - singular: apiserver - plural: apiservers listKind: APIServerList + plural: apiservers + singular: apiserver + scope: Cluster versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - schema: - "openAPIV3Schema": - description: APIServer holds configuration (like serving certificates, client - CA and CORS domains) shared by all API servers in the system, among them - especially kube-apiserver and openshift-apiserver. The canonical name of - an instance is 'cluster'. - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - type: object - properties: - additionalCORSAllowedOrigins: - description: additionalCORSAllowedOrigins lists additional, user-defined - regular expressions describing hosts for which the API server allows - access using the CORS headers. This may be needed to access the - API and the integrated OAuth server from JavaScript applications. - The values are regular expressions that correspond to the Golang - regular expression language. - type: array - items: - type: string - audit: - description: audit specifies the settings for audit configuration - to be applied to all OpenShift-provided API servers in the cluster. - type: object - default: - profile: Default - properties: - profile: - description: "profile specifies the name of the desired audit - policy configuration to be deployed to all OpenShift-provided - API servers in the cluster. \n The following profiles are provided: - - Default: the existing default policy. - WriteRequestBodies: - like 'Default', but logs request and response HTTP payloads - for write requests (create, update, patch). - AllRequestBodies: - like 'WriteRequestBodies', but also logs request and response - HTTP payloads for read requests (get, list). \n If unset, the - 'Default' profile is used as the default." - type: string - default: Default - enum: - - Default - - WriteRequestBodies - - AllRequestBodies - clientCA: - description: 'clientCA references a ConfigMap containing a certificate - bundle for the signers that will be recognized for incoming client - certificates in addition to the operator managed signers. If this - is empty, then only operator managed signers are valid. You usually - only have to set this if you have your own PKI you wish to honor - client certificates from. The ConfigMap must exist in the openshift-config - namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - - CA bundle.' - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced config - map + - name: v1 + schema: + openAPIV3Schema: + description: APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + additionalCORSAllowedOrigins: + description: additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language. + type: array + items: type: string - encryption: - description: encryption allows the configuration of encryption of - resources at the datastore layer. - type: object - properties: - type: - description: "type defines what encryption type should be used - to encrypt resources at the datastore layer. When this field - is unset (i.e. when it is set to the empty string), identity - is implied. The behavior of unset can and will change over time. - \ Even if encryption is enabled by default, the meaning of unset - may change to a different encryption type based on changes in - best practices. \n When encryption is enabled, all sensitive - resources shipped with the platform are encrypted. This list - of sensitive resources can and will change over time. The current - authoritative list is: \n 1. secrets 2. configmaps 3. - routes.route.openshift.io 4. oauthaccesstokens.oauth.openshift.io - \ 5. oauthauthorizetokens.oauth.openshift.io" - type: string - enum: - - "" - - identity - - aescbc - servingCerts: - description: servingCert is the TLS cert info for serving secure traffic. - If not specified, operator managed certificates will be used for - serving secure traffic. - type: object - properties: - namedCertificates: - description: namedCertificates references secrets containing the - TLS cert info for serving secure traffic to specific hostnames. - If no named certificates are provided, or no named certificates - match the server name as understood by a client, the defaultServingCertificate - will be used. - type: array - items: - description: APIServerNamedServingCert maps a server DNS name, - as understood by a client, to a certificate. + audit: + description: audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster. + type: object + default: + profile: Default + properties: + customRules: + description: customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies. + type: array + items: + description: AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile. + type: object + required: + - group + - profile + properties: + group: + description: group is a name of group a request user must be member of in order to this profile to apply. + type: string + minLength: 1 + profile: + description: "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster. \n The following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n If unset, the 'Default' profile is used as the default." + type: string + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + - None + x-kubernetes-list-map-keys: + - group + x-kubernetes-list-type: map + profile: + description: "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules. \n The following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody level). - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n Warning: to raise a Red Hat support request, it is required to set this to Default, WriteRequestBodies, or AllRequestBodies to generate audit log events that can be analyzed by support. \n If unset, the 'Default' profile is used as the default." + type: string + default: Default + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + - None + clientCA: + description: 'clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - CA bundle.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + encryption: + description: encryption allows the configuration of encryption of resources at the datastore layer. + type: object + properties: + type: + description: "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices. \n When encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io" + type: string + enum: + - "" + - identity + - aescbc + servingCerts: + description: servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic. + type: object + properties: + namedCertificates: + description: namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used. + type: array + items: + description: APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate. + type: object + properties: + names: + description: names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. + type: array + items: + type: string + servingCertificate: + description: 'servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - TLS certificate.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. \n If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12." + type: object + properties: + custom: + description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1" type: object properties: - names: - description: names is a optional list of explicit DNS names - (leading wildcards allowed) that should use this certificate - to serve secure traffic. If no names are provided, the - implicit names will be extracted from the certificates. - Exact names trump over wildcard names. Explicit names - defined here trump over extracted implicit names. + ciphers: + description: "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" type: array items: type: string - servingCertificate: - description: 'servingCertificate references a kubernetes.io/tls - type secret containing the TLS cert info for serving secure - traffic. The secret must exist in the openshift-config - namespace and contain the following required fields: - - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - - TLS certificate.' - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - tlsSecurityProfile: - description: "tlsSecurityProfile specifies settings for TLS connections - for externally exposed servers. \n If unset, a default (which may - change between releases) is chosen. Note that only Old and Intermediate - profiles are currently supported, and the maximum available MinTLSVersions - is VersionTLS12." - type: object - properties: - custom: - description: "custom is a user-defined TLS security profile. Be - extremely careful using a custom profile as invalid configurations - can be catastrophic. An example custom profile looks like this: - \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - \ - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - \ minTLSVersion: TLSv1.1" - type: object - properties: - ciphers: - description: "ciphers is used to specify the cipher algorithms - that are negotiated during the TLS handshake. Operators - may remove entries their operands do not support. For example, - to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" - type: array - items: + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" type: string - minTLSVersion: - description: "minTLSVersion is used to specify the minimal - version of the TLS protocol that is negotiated during the - TLS handshake. For example, to use TLS versions 1.1, 1.2 - and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently - the highest minTLSVersion allowed is VersionTLS12" - type: string - enum: - - VersionTLS10 - - VersionTLS11 - - VersionTLS12 - - VersionTLS13 - nullable: true - intermediate: - description: "intermediate is a TLS security profile based on: - \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 - \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - \ minTLSVersion: TLSv1.2" - type: object - nullable: true - modern: - description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility - \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - \ minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." - type: object - nullable: true - old: - description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility - \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - \ - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - \ - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - \ - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - \ - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - \ - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - \ - AES128-SHA256 - AES256-SHA256 - AES128-SHA - - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" - type: object - nullable: true - type: - description: "type is one of Old, Intermediate, Modern or Custom. - Custom provides the ability to specify individual TLS security - profile parameters. Old, Intermediate and Modern are TLS security - profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations - \n The profiles are intent based, so they may change over time - as new ciphers are developed and existing ciphers are found - to be insecure. Depending on precisely which ciphers are available - to a process, the list may be reduced. \n Note that the Modern - profile is currently not supported because it is not yet well - adopted by common software libraries." - type: string - enum: - - Old - - Intermediate - - Modern - - Custom - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + nullable: true + intermediate: + description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2" + type: object + nullable: true + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + type: object + nullable: true + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + type: object + nullable: true + type: + description: "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations \n The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced. \n Note that the Modern profile is currently not supported because it is not yet well adopted by common software libraries." + type: string + enum: + - Old + - Intermediate + - Modern + - Custom + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml index b90d578f3e..736ea3af01 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml @@ -1,161 +1,101 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: authentications.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: authentications.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: Authentication listKind: AuthenticationList plural: authentications singular: authentication + scope: Cluster versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - description: Authentication specifies cluster-wide settings for authentication - (like OAuth and webhook token authenticators). The canonical name of an - instance is `cluster`. - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - type: object - properties: - oauthMetadata: - description: 'oauthMetadata contains the discovery endpoint data for - OAuth 2.0 Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw ''/.well-known/oauth-authorization-server'' For further - details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence over - any metadata reference stored in status. The key "oauthMetadata" - is used to locate the data. If specified and the config map or expected - key is not found, no metadata is served. If the specified metadata - is not valid, no metadata is served. The namespace for this config - map is openshift-config.' - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - serviceAccountIssuer: - description: 'serviceAccountIssuer is the identifier of the bound - service account token issuer. The default is https://kubernetes.default.svc - WARNING: Updating this field will result in the invalidation of - all bound tokens with the previous issuer value. Unless the holder - of a bound token has explicit support for a change in issuer, they - will not request a new bound token until pod restart or until their - existing token exceeds 80% of its duration.' - type: string - type: - description: type identifies the cluster managed, user facing authentication - mode in use. Specifically, it manages the component that responds - to login attempts. The default is IntegratedOAuth. - type: string - webhookTokenAuthenticator: - description: webhookTokenAuthenticator configures a remote token reviewer. - These remote authentication webhooks can be used to verify bearer - tokens via the tokenreviews.authentication.k8s.io REST API. This - is required to honor bearer tokens that are provisioned by an external - authentication service. - type: object - required: - - kubeConfig - properties: - kubeConfig: - description: "kubeConfig references a secret that contains kube - config file data which describes how to access the remote webhook - service. The namespace for the referenced secret is openshift-config. - \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - \n The key \"kubeConfig\" is used to locate the data. If the - secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored." - type: object - required: + - name: v1 + schema: + openAPIV3Schema: + description: Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + oauthMetadata: + description: 'oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key "oauthMetadata" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.' + type: object + required: - name - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - type: array - items: - description: deprecatedWebhookTokenAuthenticator holds the necessary - configuration options for a remote token authenticator. It's the - same as WebhookTokenAuthenticator but it's missing the 'required' - validation on KubeConfig field. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + serviceAccountIssuer: + description: 'serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will result in the invalidation of all bound tokens with the previous issuer value. Unless the holder of a bound token has explicit support for a change in issuer, they will not request a new bound token until pod restart or until their existing token exceeds 80% of its duration.' + type: string + type: + description: type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth. + type: string + webhookTokenAuthenticator: + description: webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. type: object + required: + - kubeConfig properties: kubeConfig: - description: 'kubeConfig contains kube config file data which - describes how to access the remote webhook service. For further - details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. If the secret - or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored. The namespace for this secret is determined - by the point of use.' + description: "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config. \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication \n The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored." type: object required: - - name + - name properties: name: - description: name is the metadata.name of the referenced - secret + description: name is the metadata.name of the referenced secret type: string - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - properties: - integratedOAuthMetadata: - description: 'integratedOAuthMetadata contains the discovery endpoint - data for OAuth 2.0 Authorization Server Metadata for the in-cluster - integrated OAuth server. This discovery document can be viewed from - its served location: oc get --raw ''/.well-known/oauth-authorization-server'' - For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. An explicitly - set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set - to IntegratedOAuth. The key "oauthMetadata" is used to locate the - data. If the config map or expected key is not found, no metadata - is served. If the specified metadata is not valid, no metadata is - served. The namespace for this config map is openshift-config-managed.' - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string + webhookTokenAuthenticators: + description: webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + type: array + items: + description: deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. + type: object + properties: + kubeConfig: + description: 'kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key "kubeConfig" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + integratedOAuthMetadata: + description: 'integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key "oauthMetadata" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml index 7a71db46ad..3055b2b805 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml @@ -1,392 +1,271 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: builds.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: builds.config.openshift.io spec: group: config.openshift.io - scope: Cluster - preserveUnknownFields: false names: kind: Build - singular: build - plural: builds listKind: BuildList + plural: builds + singular: build + preserveUnknownFields: false + scope: Cluster versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - "validation": - "openAPIV3Schema": - description: "Build configures the behavior of OpenShift builds for the entire - cluster. This includes default settings that can be overridden in BuildConfig - objects, and overrides which are applied to all builds. \n The canonical name - is \"cluster\"" - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec holds user-settable values for the build controller configuration + - name: v1 + schema: + openAPIV3Schema: + description: "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds. \n The canonical name is \"cluster\"" type: object + required: + - spec properties: - additionalTrustedCA: - description: "AdditionalTrustedCA is a reference to a ConfigMap containing - additional CAs that should be trusted for image pushes and pulls during - builds. The namespace for this config map is openshift-config. \n - DEPRECATED: Additional CAs for image pull and push should be set on - image.config.openshift.io/cluster instead." + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - buildDefaults: - description: BuildDefaults controls the default information for Builds + spec: + description: Spec holds user-settable values for the build controller configuration type: object properties: - defaultProxy: - description: "DefaultProxy contains the default proxy settings for - all build operations, including image pull/push and source download. - \n Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, - and `NO_PROXY` environment variables in the build config's strategy." + additionalTrustedCA: + description: "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config. \n DEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead." type: object + required: + - name properties: - httpProxy: - description: httpProxy is the URL of the proxy for HTTP requests. Empty - means unset and will not result in an env var. + name: + description: name is the metadata.name of the referenced config map type: string - httpsProxy: - description: httpsProxy is the URL of the proxy for HTTPS requests. Empty - means unset and will not result in an env var. - type: string - noProxy: - description: noProxy is a comma-separated list of hostnames - and/or CIDRs for which the proxy should not be used. Empty - means unset and will not result in an env var. - type: string - readinessEndpoints: - description: readinessEndpoints is a list of endpoints used - to verify readiness of the proxy. - type: array - items: - type: string - trustedCA: - description: "trustedCA is a reference to a ConfigMap containing - a CA certificate bundle. The trustedCA field should only be - consumed by a proxy validator. The validator is responsible - for reading the certificate bundle from the required key \"ca-bundle.crt\", - merging it with the system default trust bundle, and writing - the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" - in the \"openshift-config-managed\" namespace. Clients that - expect to make proxy connections must use the trusted-ca-bundle - for all HTTPS requests to the proxy, and may use the trusted-ca-bundle - for non-proxy HTTPS requests as well. \n The namespace for - the ConfigMap referenced by trustedCA is \"openshift-config\". - Here is an example ConfigMap (in yaml): \n apiVersion: v1 - kind: ConfigMap metadata: name: user-ca-bundle namespace: - openshift-config data: ca-bundle.crt: | -----BEGIN - CERTIFICATE----- Custom CA certificate bundle. -----END - CERTIFICATE-----" + buildDefaults: + description: BuildDefaults controls the default information for Builds + type: object + properties: + defaultProxy: + description: "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download. \n Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy." type: object - required: - - name properties: - name: - description: name is the metadata.name of the referenced - config map + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. type: string - env: - description: Env is a set of default environment variables that - will be applied to the build if the specified variables do not - exist on the build - type: array - items: - description: EnvVar represents an environment variable present - in a Container. - type: object - required: - - name - properties: - name: - description: Name of the environment variable. Must be a C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a variable - cannot be resolved, the reference in the input string will - be unchanged. The $(VAR_NAME) syntax can be escaped with - a double $$, ie: $$(VAR_NAME). Escaped references will never - be expanded, regardless of whether the variable exists or - not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + type: array + items: + type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well. \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + env: + description: Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build + type: array + items: + description: EnvVar represents an environment variable present in a Container. type: object + required: + - name properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - type: object - required: - - key - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' - type: object - required: - - fieldPath - properties: - apiVersion: - description: Version of the schema the FieldPath is - written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the specified - API version. - type: string - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' - type: object - required: - - resource - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - description: Specifies the output format of the exposed - resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - secretKeyRef: - description: Selects a key of a secret in the pod's namespace + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. type: object - required: - - key properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - gitProxy: - description: "GitProxy contains the proxy settings for git operations - only. If set, this will override any Proxy settings for all git - commands, such as git clone. \n Values that are not set here will - be inherited from DefaultProxy." - type: object - properties: - httpProxy: - description: httpProxy is the URL of the proxy for HTTP requests. Empty - means unset and will not result in an env var. - type: string - httpsProxy: - description: httpsProxy is the URL of the proxy for HTTPS requests. Empty - means unset and will not result in an env var. - type: string - noProxy: - description: noProxy is a comma-separated list of hostnames - and/or CIDRs for which the proxy should not be used. Empty - means unset and will not result in an env var. - type: string - readinessEndpoints: - description: readinessEndpoints is a list of endpoints used - to verify readiness of the proxy. + configMapKeyRef: + description: Selects a key of a ConfigMap. + type: object + required: + - key + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + type: object + required: + - fieldPath + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + type: object + required: + - resource + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + gitProxy: + description: "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone. \n Values that are not set here will be inherited from DefaultProxy." + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + type: array + items: + type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well. \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + imageLabels: + description: ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig. type: array items: - type: string - trustedCA: - description: "trustedCA is a reference to a ConfigMap containing - a CA certificate bundle. The trustedCA field should only be - consumed by a proxy validator. The validator is responsible - for reading the certificate bundle from the required key \"ca-bundle.crt\", - merging it with the system default trust bundle, and writing - the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" - in the \"openshift-config-managed\" namespace. Clients that - expect to make proxy connections must use the trusted-ca-bundle - for all HTTPS requests to the proxy, and may use the trusted-ca-bundle - for non-proxy HTTPS requests as well. \n The namespace for - the ConfigMap referenced by trustedCA is \"openshift-config\". - Here is an example ConfigMap (in yaml): \n apiVersion: v1 - kind: ConfigMap metadata: name: user-ca-bundle namespace: - openshift-config data: ca-bundle.crt: | -----BEGIN - CERTIFICATE----- Custom CA certificate bundle. -----END - CERTIFICATE-----" + type: object + properties: + name: + description: Name defines the name of the label. It must have non-zero length. + type: string + value: + description: Value defines the literal value of the label. + type: string + resources: + description: Resources defines resource requirements to execute the build. type: object - required: - - name properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - imageLabels: - description: ImageLabels is a list of docker labels that are applied - to the resulting image. User can override a default label by providing - a label with the same name in their Build/BuildConfig. - type: array - items: - type: object - properties: - name: - description: Name defines the name of the label. It must have - non-zero length. - type: string - value: - description: Value defines the literal value of the label. - type: string - resources: - description: Resources defines resource requirements to execute - the build. + limits: + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + requests: + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + buildOverrides: + description: BuildOverrides controls override settings for builds type: object properties: - limits: - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - additionalProperties: - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - requests: - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + forcePull: + description: ForcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself + type: boolean + imageLabels: + description: ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten. + type: array + items: + type: object + properties: + name: + description: Name defines the name of the label. It must have non-zero length. + type: string + value: + description: Value defines the literal value of the label. + type: string + nodeSelector: + description: NodeSelector is a selector which must be true for the build pod to fit on a node type: object additionalProperties: - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - buildOverrides: - description: BuildOverrides controls override settings for builds - type: object - properties: - forcePull: - description: ForcePull overrides, if set, the equivalent value in - the builds, i.e. false disables force pull for all builds, true - enables force pull for all builds, independently of what each - build specifies itself - type: boolean - imageLabels: - description: ImageLabels is a list of docker labels that are applied - to the resulting image. If user provided a label in their Build/BuildConfig - with the same name as one in this list, the user's label will - be overwritten. - type: array - items: - type: object - properties: - name: - description: Name defines the name of the label. It must have - non-zero length. - type: string - value: - description: Value defines the literal value of the label. - type: string - nodeSelector: - description: NodeSelector is a selector which must be true for the - build pod to fit on a node - type: object - additionalProperties: - type: string - tolerations: - description: Tolerations is a list of Tolerations that will override - any existing tolerations set on a build pod. - type: array - items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using the - matching operator . - type: object - properties: - effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match - all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the - value. Valid operators are Exists and Equal. Defaults to - Equal. Exists is equivalent to wildcard for value, so that - a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time - the toleration (which must be of effect NoExecute, otherwise - this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do - not evict). Zero and negative values will be treated as - 0 (evict immediately) by the system. - type: integer - format: int64 - value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. type: string + tolerations: + description: Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml index 69639c1d97..a07cd57329 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml @@ -1,74 +1,57 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: consoles.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: consoles.config.openshift.io spec: - scope: Cluster - preserveUnknownFields: false group: config.openshift.io names: kind: Console listKind: ConsoleList plural: consoles singular: console - subresources: - status: {} + scope: Cluster versions: - - name: v1 - served: true - storage: true - "validation": - "openAPIV3Schema": - description: Console holds cluster-wide configuration for the web console, including - the logout URL, and reports the public URL of the console. The canonical name - is `cluster`. - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration + - name: v1 + schema: + openAPIV3Schema: + description: Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`. type: object + required: + - spec properties: - authentication: - description: ConsoleAuthentication defines a list of optional configuration - for console authentication. + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + authentication: + description: ConsoleAuthentication defines a list of optional configuration for console authentication. + type: object + properties: + logoutRedirect: + description: 'An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user''s token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.' + type: string + pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$ + status: + description: status holds observed values from the cluster. They may not be overridden. type: object properties: - logoutRedirect: - description: 'An optional, absolute URL to redirect web browsers - to after logging out of the console. If not specified, it will - redirect to the default login page. This is required when using - an identity provider that supports single sign-on (SSO) such as: - - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - - OAuth (GitHub, GitLab, Google) Logging out of the console will - destroy the user''s token. The logoutRedirect provides the user - the option to perform single logout (SLO) through the identity - provider to destroy their single sign-on session.' + consoleURL: + description: The URL for the console. This will be derived from the host for the route that is created for the console. type: string - pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$ - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - properties: - consoleURL: - description: The URL for the console. This will be derived from the - host for the route that is created for the console. - type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml index 8e6f86222e..fde17e4d03 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml @@ -1,11 +1,12 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: dnses.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: dnses.config.openshift.io spec: group: config.openshift.io names: @@ -14,91 +15,58 @@ spec: plural: dnses singular: dns scope: Cluster - preserveUnknownFields: false versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - "validation": - "openAPIV3Schema": - description: DNS holds cluster-wide information about DNS. The canonical name - is `cluster` - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration + - name: v1 + schema: + openAPIV3Schema: + description: DNS holds cluster-wide information about DNS. The canonical name is `cluster` type: object + required: + - spec properties: - baseDomain: - description: "baseDomain is the base domain of the cluster. All managed - DNS records will be sub-domains of this base. \n For example, given - the base domain `openshift.example.com`, an API server DNS record - may be created for `cluster-api.openshift.example.com`. \n Once set, - this field cannot be changed." + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string - privateZone: - description: "privateZone is the location where all the DNS records - that are only available internally to the cluster exist. \n If this - field is nil, no private records should be created. \n Once set, this - field cannot be changed." + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: type: object - properties: - id: - description: "id is the identifier that can be used to find the - DNS hosted zone. \n on AWS zone can be fetched using `ID` as id - in [1] on Azure zone can be fetched using `ID` as a pre-determined - name in [2], on GCP zone can be fetched using `ID` as a pre-determined - name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" - type: string - tags: - description: "tags can be used to query the DNS hosted zone. \n - on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone - using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" - type: object - additionalProperties: - type: string - publicZone: - description: "publicZone is the location where all the DNS records that - are publicly accessible to the internet exist. \n If this field is - nil, no public records should be created. \n Once set, this field - cannot be changed." + spec: + description: spec holds user settable values for configuration type: object properties: - id: - description: "id is the identifier that can be used to find the - DNS hosted zone. \n on AWS zone can be fetched using `ID` as id - in [1] on Azure zone can be fetched using `ID` as a pre-determined - name in [2], on GCP zone can be fetched using `ID` as a pre-determined - name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + baseDomain: + description: "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base. \n For example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`. \n Once set, this field cannot be changed." type: string - tags: - description: "tags can be used to query the DNS hosted zone. \n - on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone - using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + privateZone: + description: "privateZone is the location where all the DNS records that are only available internally to the cluster exist. \n If this field is nil, no private records should be created. \n Once set, this field cannot be changed." type: object - additionalProperties: - type: string - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object + properties: + id: + description: "id is the identifier that can be used to find the DNS hosted zone. \n on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + description: "tags can be used to query the DNS hosted zone. \n on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + additionalProperties: + type: string + publicZone: + description: "publicZone is the location where all the DNS records that are publicly accessible to the internet exist. \n If this field is nil, no public records should be created. \n Once set, this field cannot be changed." + type: object + properties: + id: + description: "id is the identifier that can be used to find the DNS hosted zone. \n on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + description: "tags can be used to query the DNS hosted zone. \n on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + additionalProperties: + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml index 8bba554b46..aabf4f63f4 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml @@ -1,78 +1,63 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: featuregates.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: featuregates.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: FeatureGate listKind: FeatureGateList plural: featuregates singular: featuregate + scope: Cluster versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - description: Feature holds cluster-wide information about feature gates. The - canonical name is `cluster` - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - type: object - properties: - customNoUpgrade: - description: customNoUpgrade allows the enabling or disabling of any - feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE - UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting - cannot be validated. If you have any typos or accidentally apply - invalid combinations your cluster may fail in an unrecoverable way. featureSet - must equal "CustomNoUpgrade" must be set to use this field. - type: object - properties: - disabled: - description: disabled is a list of all feature gates that you - want to force off - type: array - items: - type: string - enabled: - description: enabled is a list of all feature gates that you want - to force on - type: array - items: - type: string - nullable: true - featureSet: - description: featureSet changes the list of features in the cluster. The - default is empty. Be very careful adjusting this setting. Turning - on or off features may cause irreversible changes in your cluster - which cannot be undone. - type: string - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object + - name: v1 + schema: + openAPIV3Schema: + description: Feature holds cluster-wide information about feature gates. The canonical name is `cluster` + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + customNoUpgrade: + description: customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field. + type: object + properties: + disabled: + description: disabled is a list of all feature gates that you want to force off + type: array + items: + type: string + enabled: + description: enabled is a list of all feature gates that you want to force on + type: array + items: + type: string + nullable: true + featureSet: + description: featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone. + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml index 35ed9bf174..ea1b6b36f8 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml @@ -1,161 +1,108 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: images.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: images.config.openshift.io spec: group: config.openshift.io - scope: Cluster - preserveUnknownFields: false names: kind: Image - singular: image - plural: images listKind: ImageList + plural: images + singular: image + scope: Cluster versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - "validation": - "openAPIV3Schema": - description: Image governs policies related to imagestream imports and runtime - configuration for external registries. It allows cluster admins to configure - which registries OpenShift is allowed to import images from, extra CA trust - bundles for external registries, and policies to block or allow registry hostnames. - When exposing OpenShift's image registry to the public, this also lets cluster - admins specify the external hostname. - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration + - name: v1 + schema: + openAPIV3Schema: + description: Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname. type: object + required: + - spec properties: - additionalTrustedCA: - description: additionalTrustedCA is a reference to a ConfigMap containing - additional CAs that should be trusted during imagestream import, pod - image pull, build image pull, and imageregistry pullthrough. The namespace - for this config map is openshift-config. + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - allowedRegistriesForImport: - description: allowedRegistriesForImport limits the container image registries - that normal users may import images from. Set this list to the registries - that you trust to contain valid Docker images and that you want applications - to be able to import from. Users with permission to create Images - or ImageStreamMappings via the API are not affected by this policy - - typically only administrators or system integrations will have those - permissions. - type: array - items: - description: RegistryLocation contains a location of the registry - specified by the registry domain name. The domain name might include - wildcards, like '*' or '??'. - type: object - properties: - domainName: - description: domainName specifies a domain name for the registry - In case the registry use non-standard (80 or 443) port, the - port should be included in the domain name as well. - type: string - insecure: - description: insecure indicates whether the registry is secure - (https) or insecure (http) By default (if not specified) the - registry is assumed as secure. - type: boolean - externalRegistryHostnames: - description: externalRegistryHostnames provides the hostnames for the - default external image registry. The external hostname should be set - only when the image registry is exposed externally. The first value - is used in 'publicDockerImageRepository' field in ImageStreams. The - value must be in "hostname[:port]" format. - type: array - items: - type: string - registrySources: - description: registrySources contains configuration that determines - how the container runtime should treat individual registries when - accessing images for builds+pods. (e.g. whether or not to allow insecure - access). It does not contain configuration for the internal cluster - registry. + spec: + description: spec holds user settable values for configuration type: object properties: - allowedRegistries: - description: "allowedRegistries are the only registries permitted - for image pull and push actions. All other registries are denied. - \n Only one of BlockedRegistries or AllowedRegistries may be set." - type: array - items: - type: string - blockedRegistries: - description: "blockedRegistries cannot be used for image pull and - push actions. All other registries are permitted. \n Only one - of BlockedRegistries or AllowedRegistries may be set." + additionalTrustedCA: + description: additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + allowedRegistriesForImport: + description: allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions. type: array items: - type: string - containerRuntimeSearchRegistries: - description: 'containerRuntimeSearchRegistries are registries that - will be searched when pulling images that do not have fully qualified - domains in their pull specs. Registries will be searched in the - order provided in the list. Note: this search list only works - with the container runtime, i.e CRI-O. Will NOT work with builds - or imagestream imports.' + description: RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'. + type: object + properties: + domainName: + description: domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well. + type: string + insecure: + description: insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure. + type: boolean + externalRegistryHostnames: + description: externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in "hostname[:port]" format. type: array - format: hostname - minItems: 1 items: type: string - x-kubernetes-list-type: set - insecureRegistries: - description: insecureRegistries are registries which do not have - a valid TLS certificates or only support HTTP connections. + registrySources: + description: registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry. + type: object + properties: + allowedRegistries: + description: "allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied. \n Only one of BlockedRegistries or AllowedRegistries may be set." + type: array + items: + type: string + blockedRegistries: + description: "blockedRegistries cannot be used for image pull and push actions. All other registries are permitted. \n Only one of BlockedRegistries or AllowedRegistries may be set." + type: array + items: + type: string + containerRuntimeSearchRegistries: + description: 'containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified domains in their pull specs. Registries will be searched in the order provided in the list. Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports.' + type: array + format: hostname + minItems: 1 + items: + type: string + x-kubernetes-list-type: set + insecureRegistries: + description: insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. + type: array + items: + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + externalRegistryHostnames: + description: externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in "hostname[:port]" format. type: array items: type: string - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - properties: - externalRegistryHostnames: - description: externalRegistryHostnames provides the hostnames for the - default external image registry. The external hostname should be set - only when the image registry is exposed externally. The first value - is used in 'publicDockerImageRepository' field in ImageStreams. The - value must be in "hostname[:port]" format. - type: array - items: - type: string - internalRegistryHostname: - description: internalRegistryHostname sets the hostname for the default - internal image registry. The value must be in "hostname[:port]" format. - This value is set by the image registry operator which controls the - internal registry hostname. For backward compatibility, users can - still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this - setting overrides the environment variable. - type: string + internalRegistryHostname: + description: internalRegistryHostname sets the hostname for the default internal image registry. The value must be in "hostname[:port]" format. This value is set by the image registry operator which controls the internal registry hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml index 212c1e21f5..ed97c441aa 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml @@ -1,11 +1,12 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: infrastructures.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: infrastructures.config.openshift.io spec: group: config.openshift.io names: @@ -15,457 +16,137 @@ spec: singular: infrastructure scope: Cluster versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - description: Infrastructure holds cluster-wide information about Infrastructure. The - canonical name is `cluster` - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - type: object - properties: - cloudConfig: - description: "cloudConfig is a reference to a ConfigMap containing - the cloud provider configuration file. This configuration file is - used to configure the Kubernetes cloud provider integration when - using the built-in cloud provider integration or the external cloud - controller manager. The namespace for this config map is openshift-config. - \n cloudConfig should only be consumed by the kube_cloud_config - controller. The controller is responsible for using the user configuration - in the spec for various platforms and combining that with the user - provided ConfigMap in this field to create a stitched kube cloud - config. The controller generates a ConfigMap `kube-cloud-config` - in `openshift-config-managed` namespace with the kube cloud config - is stored in `cloud.conf` key. All the clients are expected to use - the generated ConfigMap only." - type: object - properties: - key: - description: Key allows pointing to a specific key/value inside - of the configmap. This is useful for logical file references. - type: string - name: - type: string - platformSpec: - description: platformSpec holds desired information specific to the - underlying infrastructure provider. - type: object - properties: - aws: - description: AWS contains settings specific to the Amazon Web - Services infrastructure provider. - type: object - properties: - serviceEndpoints: - description: serviceEndpoints list contains custom endpoints - which will override default service endpoint of AWS Services. - There must be only one ServiceEndpoint for a service. - type: array - items: - description: AWSServiceEndpoint store the configuration - of a custom url to override existing defaults of AWS Services. - type: object - properties: - name: - description: name is the name of the AWS service. The - list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - This must be provided and cannot be empty. - type: string - pattern: ^[a-z0-9-]+$ - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - type: string - pattern: ^https:// - azure: - description: Azure contains settings specific to the Azure infrastructure - provider. - type: object - baremetal: - description: BareMetal contains settings specific to the BareMetal - platform. - type: object - equinixMetal: - description: EquinixMetal contains settings specific to the Equinix - Metal infrastructure provider. - type: object - gcp: - description: GCP contains settings specific to the Google Cloud - Platform infrastructure provider. - type: object - ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud - infrastructure provider. - type: object - kubevirt: - description: Kubevirt contains settings specific to the kubevirt - infrastructure provider. - type: object - openstack: - description: OpenStack contains settings specific to the OpenStack - infrastructure provider. - type: object - ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure - provider. - type: object - type: - description: type is the underlying infrastructure provider for - the cluster. This value controls whether infrastructure automation - such as service load balancers, dynamic volume provisioning, - machine creation and deletion, and other integrations are enabled. - If None, no infrastructure automation is enabled. Allowed values - are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", - "VSphere", "oVirt", "KubeVirt", "EquinixMetal", and "None". - Individual components may not support all platforms, and must - handle unrecognized platforms as None if they do not support - that platform. - type: string - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - vsphere: - description: VSphere contains settings specific to the VSphere - infrastructure provider. - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - properties: - apiServerInternalURI: - description: apiServerInternalURL is a valid URI with scheme 'https', - address and optionally a port (defaulting to 443). apiServerInternalURL - can be used by components like kubelets, to contact the Kubernetes - API server using the infrastructure provider rather than Kubernetes - networking. - type: string - apiServerURL: - description: apiServerURL is a valid URI with scheme 'https', address - and optionally a port (defaulting to 443). apiServerURL can be - used by components like the web console to tell users where to find - the Kubernetes API. - type: string - controlPlaneTopology: - description: controlPlaneTopology expresses the expectations for operands - that normally run on control nodes. The default is 'HighlyAvailable', - which represents the behavior operators have in a "normal" cluster. - The 'SingleReplica' mode will be used in single-node deployments - and the operators should not configure the operand for highly-available - operation - type: string - default: HighlyAvailable - enum: - - HighlyAvailable - - SingleReplica - etcdDiscoveryDomain: - description: 'etcdDiscoveryDomain is the domain used to fetch the - SRV records for discovering etcd servers and clients. For more info: - https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery - deprecated: as of 4.7, this field is no longer set or honored. It - will be removed in a future release.' - type: string - infrastructureName: - description: infrastructureName uniquely identifies a cluster with - a human friendly name. Once set it should not be changed. Must be - of max length 27 and must have only alphanumeric or hyphen characters. - type: string - infrastructureTopology: - description: infrastructureTopology expresses the expectations for - infrastructure services that do not run on control plane nodes, - usually indicated by a node selector for a `role` value other than - `master`. The default is 'HighlyAvailable', which represents the - behavior operators have in a "normal" cluster. The 'SingleReplica' - mode will be used in single-node deployments and the operators should - not configure the operand for highly-available operation - type: string - default: HighlyAvailable - enum: - - HighlyAvailable - - SingleReplica - platform: - description: "platform is the underlying infrastructure provider for - the cluster. \n Deprecated: Use platformStatus.type instead." - type: string - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - platformStatus: - description: platformStatus holds status information specific to the - underlying infrastructure provider. - type: object - properties: - aws: - description: AWS contains settings specific to the Amazon Web - Services infrastructure provider. - type: object - properties: - region: - description: region holds the default AWS region for new AWS - resources created by the cluster. - type: string - serviceEndpoints: - description: ServiceEndpoints list contains custom endpoints - which will override default service endpoint of AWS Services. - There must be only one ServiceEndpoint for a service. - type: array - items: - description: AWSServiceEndpoint store the configuration - of a custom url to override existing defaults of AWS Services. - type: object - properties: - name: - description: name is the name of the AWS service. The - list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - This must be provided and cannot be empty. - type: string - pattern: ^[a-z0-9-]+$ - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - type: string - pattern: ^https:// - azure: - description: Azure contains settings specific to the Azure infrastructure - provider. - type: object - properties: - cloudName: - description: cloudName is the name of the Azure cloud environment - which can be used to configure the Azure SDK with the appropriate - Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. - type: string - enum: + - name: v1 + schema: + openAPIV3Schema: + description: Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster` + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + cloudConfig: + description: "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config. \n cloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only." + type: object + properties: + key: + description: Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + type: string + name: + type: string + platformSpec: + description: platformSpec holds desired information specific to the underlying infrastructure provider. + type: object + properties: + aws: + description: AWS contains settings specific to the Amazon Web Services infrastructure provider. + type: object + properties: + serviceEndpoints: + description: serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. + type: array + items: + description: AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services. + type: object + properties: + name: + description: name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty. + type: string + pattern: ^[a-z0-9-]+$ + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + type: string + pattern: ^https:// + azure: + description: Azure contains settings specific to the Azure infrastructure provider. + type: object + baremetal: + description: BareMetal contains settings specific to the BareMetal platform. + type: object + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + type: object + gcp: + description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. + type: object + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud infrastructure provider. + type: object + kubevirt: + description: Kubevirt contains settings specific to the kubevirt infrastructure provider. + type: object + openstack: + description: OpenStack contains settings specific to the OpenStack infrastructure provider. + type: object + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure provider. + type: object + type: + description: type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", and "None". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. + type: string + enum: - "" - - AzurePublicCloud - - AzureUSGovernmentCloud - - AzureChinaCloud - - AzureGermanCloud - networkResourceGroupName: - description: networkResourceGroupName is the Resource Group - for network resources like the Virtual Network and Subnets - used by the cluster. If empty, the value is same as ResourceGroupName. - type: string - resourceGroupName: - description: resourceGroupName is the Resource Group for new - Azure resources created for the cluster. - type: string - baremetal: - description: BareMetal contains settings specific to the BareMetal - platform. - type: object - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for BareMetal deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - equinixMetal: - description: EquinixMetal contains settings specific to the Equinix - Metal infrastructure provider. - type: object - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - gcp: - description: GCP contains settings specific to the Google Cloud - Platform infrastructure provider. - type: object - properties: - projectID: - description: resourceGroupName is the Project ID for new GCP - resources created for the cluster. - type: string - region: - description: region holds the region for new GCP resources - created for the cluster. - type: string - ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud - infrastructure provider. - type: object - properties: - location: - description: Location is where the cluster has been deployed - type: string - providerType: - description: ProviderType indicates the type of cluster that - was created - type: string - resourceGroupName: - description: ResourceGroupName is the Resource Group for new - IBMCloud resources created for the cluster. - type: string - kubevirt: - description: Kubevirt contains settings specific to the kubevirt - infrastructure provider. - type: object - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - openstack: - description: OpenStack contains settings specific to the OpenStack - infrastructure provider. - type: object - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - cloudName: - description: cloudName is the name of the desired OpenStack - cloud in the client configuration file (`clouds.yaml`). - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for OpenStack deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure - provider. - type: object - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - nodeDNSIP: - description: 'deprecated: as of 4.6, this field is no longer - set or honored. It will be removed in a future release.' - type: string - type: - description: "type is the underlying infrastructure provider for - the cluster. This value controls whether infrastructure automation - such as service load balancers, dynamic volume provisioning, - machine creation and deletion, and other integrations are enabled. - If None, no infrastructure automation is enabled. Allowed values - are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", - \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", and - \"None\". Individual components may not support all platforms, - and must handle unrecognized platforms as None if they do not - support that platform. \n This value will be synced with to - the `status.platform` and `status.platformStatus.type`. Currently - this value cannot be changed once set." - type: string - enum: + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + vsphere: + description: VSphere contains settings specific to the VSphere infrastructure provider. + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + apiServerInternalURI: + description: apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking. + type: string + apiServerURL: + description: apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API. + type: string + controlPlaneTopology: + description: controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. + type: string + default: HighlyAvailable + enum: + - HighlyAvailable + - SingleReplica + - External + etcdDiscoveryDomain: + description: 'etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.' + type: string + infrastructureName: + description: infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters. + type: string + infrastructureTopology: + description: 'infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is ''HighlyAvailable'', which represents the behavior operators have in a "normal" cluster. The ''SingleReplica'' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.' + type: string + default: HighlyAvailable + enum: + - HighlyAvailable + - SingleReplica + platform: + description: "platform is the underlying infrastructure provider for the cluster. \n Deprecated: Use platformStatus.type instead." + type: string + enum: - "" - AWS - Azure @@ -479,32 +160,197 @@ spec: - IBMCloud - KubeVirt - EquinixMetal - vsphere: - description: VSphere contains settings specific to the VSphere - infrastructure provider. - type: object - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for vSphere deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string + platformStatus: + description: platformStatus holds status information specific to the underlying infrastructure provider. + type: object + properties: + aws: + description: AWS contains settings specific to the Amazon Web Services infrastructure provider. + type: object + properties: + region: + description: region holds the default AWS region for new AWS resources created by the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user. + type: array + maxItems: 25 + items: + description: AWSResourceTag is a tag to apply to AWS resources created for the cluster. + type: object + required: + - key + - value + properties: + key: + description: key is the key of the tag + type: string + maxLength: 128 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + value: + description: value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services. + type: string + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + serviceEndpoints: + description: ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. + type: array + items: + description: AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services. + type: object + properties: + name: + description: name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty. + type: string + pattern: ^[a-z0-9-]+$ + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + type: string + pattern: ^https:// + azure: + description: Azure contains settings specific to the Azure infrastructure provider. + type: object + properties: + armEndpoint: + description: armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. + type: string + cloudName: + description: cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. + type: string + enum: + - "" + - AzurePublicCloud + - AzureUSGovernmentCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureStackCloud + networkResourceGroupName: + description: networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName. + type: string + resourceGroupName: + description: resourceGroupName is the Resource Group for new Azure resources created for the cluster. + type: string + baremetal: + description: BareMetal contains settings specific to the BareMetal platform. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + gcp: + description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. + type: object + properties: + projectID: + description: resourceGroupName is the Project ID for new GCP resources created for the cluster. + type: string + region: + description: region holds the region for new GCP resources created for the cluster. + type: string + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud infrastructure provider. + type: object + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain + type: string + location: + description: Location is where the cluster has been deployed + type: string + providerType: + description: ProviderType indicates the type of cluster that was created + type: string + resourceGroupName: + description: ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. + type: string + kubevirt: + description: Kubevirt contains settings specific to the kubevirt infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + openstack: + description: OpenStack contains settings specific to the OpenStack infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + cloudName: + description: cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`). + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: 'deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.' + type: string + type: + description: "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. \n This value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set." + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + vsphere: + description: VSphere contains settings specific to the VSphere infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml index 25d1c5e98e..b45c74cdb7 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml @@ -1,11 +1,12 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: ingresses.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: ingresses.config.openshift.io spec: group: config.openshift.io names: @@ -14,54 +15,186 @@ spec: plural: ingresses singular: ingress scope: Cluster - preserveUnknownFields: false versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - "validation": - "openAPIV3Schema": - description: Ingress holds cluster-wide information about ingress, including - the default ingress domain used for routes. The canonical name is `cluster`. - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration + - name: v1 + schema: + openAPIV3Schema: + description: Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`. type: object + required: + - spec properties: - appsDomain: - description: appsDomain is an optional domain to use instead of the - one specified in the domain field when a Route is created without - specifying an explicit host. If appsDomain is nonempty, this value - is used to generate default host values for Route. Unlike domain, - appsDomain may be modified after installation. This assumes a new - ingresscontroller has been setup with a wildcard certificate. + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string - domain: - description: "domain is used to generate a default host name for a route - when the route's host name is empty. The generated host name will - follow this pattern: \"..\". - \n It is also used as the default wildcard domain suffix for ingress. - The default ingresscontroller domain will follow this pattern: \"*.\". - \n Once set, changing domain is not currently supported." + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + appsDomain: + description: appsDomain is an optional domain to use instead of the one specified in the domain field when a Route is created without specifying an explicit host. If appsDomain is nonempty, this value is used to generate default host values for Route. Unlike domain, appsDomain may be modified after installation. This assumes a new ingresscontroller has been setup with a wildcard certificate. + type: string + componentRoutes: + description: "componentRoutes is an optional list of routes that are managed by OpenShift components that a cluster-admin is able to configure the hostname and serving certificate for. The namespace and name of each route in this list should match an existing entry in the status.componentRoutes list. \n To determine the set of configurable Routes, look at namespace and name of entries in the .status.componentRoutes list, where participating operators write the status of configurable routes." + type: array + items: + description: ComponentRouteSpec allows for configuration of a route's hostname and serving certificate. + type: object + required: + - hostname + - name + - namespace + properties: + hostname: + description: hostname is the hostname that should be used by the route. + type: string + format: hostname + name: + description: "name is the logical name of the route to customize. \n The namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized." + type: string + maxLength: 256 + minLength: 1 + namespace: + description: "namespace is the namespace of the route to customize. \n The namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized." + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + servingCertKeyPairSecret: + description: servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + domain: + description: "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\". \n It is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\". \n Once set, changing domain is not currently supported." + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + componentRoutes: + description: componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin. + type: array + items: + description: ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate. + type: object + required: + - defaultHostname + - name + - namespace + - relatedObjects + properties: + conditions: + description: "conditions are used to communicate the state of the componentRoutes entry. \n Supported conditions include Available, Degraded and Progressing. \n If available is true, the content served by the route can be accessed by users. This includes cases where a default may continue to serve content while the customized route specified by the cluster-admin is being configured. \n If Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. The currentHostnames field may or may not be in effect. \n If Progressing is true, that means the component is taking some action related to the componentRoutes entry." + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + consumingUsers: + description: consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret. + type: array + maxItems: 5 + items: + description: ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. + type: string + maxLength: 512 + minLength: 1 + pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + currentHostnames: + description: currentHostnames is the list of current names used by the route. Typically, this list should consist of a single hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list. + type: array + minItems: 1 + items: + description: Hostname is an alias for hostname string validation. + type: string + format: hostname + defaultHostname: + description: defaultHostname is the hostname of this route prior to customization. + type: string + format: hostname + name: + description: "name is the logical name of the route to customize. It does not have to be the actual name of a route resource but it cannot be renamed. \n The namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized." + type: string + maxLength: 256 + minLength: 1 + namespace: + description: "namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace ensures that no two components will conflict and the same component can be installed multiple times. \n The namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized." + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + relatedObjects: + description: relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. + type: array + minItems: 1 + items: + description: ObjectReference contains enough information to let you inspect or modify the referred object. + type: object + required: + - group + - name + - resource + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml index 7390943a12..5b17f211b3 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml @@ -1,11 +1,12 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: networks.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: networks.config.openshift.io spec: group: config.openshift.io names: @@ -13,143 +14,118 @@ spec: listKind: NetworkList plural: networks singular: network - scope: Cluster preserveUnknownFields: false + scope: Cluster versions: - - name: v1 - served: true - storage: true - "validation": - "openAPIV3Schema": - description: 'Network holds cluster-wide information about Network. The canonical - name is `cluster`. It is used to configure the desired network configuration, - such as: IP address pools for services/pod IPs, network plugin, etc. Please - view network.spec for an explanation on what applies when configuring this - resource.' - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration. As a general - rule, this SHOULD NOT be read directly. Instead, you should consume the - NetworkStatus, as it indicates the currently deployed configuration. Currently, - most spec fields are immutable after installation. Please view the individual - ones for further details on each. + - name: v1 + schema: + openAPIV3Schema: + description: 'Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.' type: object + required: + - spec properties: - clusterNetwork: - description: IP address pool to use for pod IPs. This field is immutable - after installation. - type: array - items: - description: ClusterNetworkEntry is a contiguous block of IP addresses - from which pod IPs are allocated. - type: object - properties: - cidr: - description: The complete block for pod IPs. - type: string - hostPrefix: - description: The size (prefix) of block to allocate to each node. - If this field is not used by the plugin, it can be left unset. - type: integer - format: int32 - minimum: 0 - externalIP: - description: externalIP defines configuration for controllers that affect - Service.ExternalIP. If nil, then ExternalIP is not allowed to be set. + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. type: object properties: - autoAssignCIDRs: - description: autoAssignCIDRs is a list of CIDRs from which to automatically - assign Service.ExternalIP. These are assigned when the service - is of type LoadBalancer. In general, this is only useful for bare-metal - clusters. In Openshift 3.x, this was misleadingly called "IngressIPs". - Automatically assigned External IPs are not affected by any ExternalIPPolicy - rules. Currently, only one entry may be provided. + clusterNetwork: + description: IP address pool to use for pod IPs. This field is immutable after installation. type: array items: - type: string - policy: - description: policy is a set of restrictions applied to the ExternalIP - field. If nil or empty, then ExternalIP is not allowed to be set. + description: ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated. + type: object + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset. + type: integer + format: int32 + minimum: 0 + externalIP: + description: externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set. type: object properties: - allowedCIDRs: - description: allowedCIDRs is the list of allowed CIDRs. + autoAssignCIDRs: + description: autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called "IngressIPs". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided. type: array items: type: string - rejectedCIDRs: - description: rejectedCIDRs is the list of disallowed CIDRs. - These take precedence over allowedCIDRs. - type: array - items: + policy: + description: policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set. + type: object + properties: + allowedCIDRs: + description: allowedCIDRs is the list of allowed CIDRs. + type: array + items: + type: string + rejectedCIDRs: + description: rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs. + type: array + items: + type: string + networkType: + description: 'NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.' + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation. + type: array + items: + type: string + serviceNodePortRange: + description: The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed. + type: string + pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. + type: array + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated. + type: object + properties: + cidr: + description: The complete block for pod IPs. type: string - networkType: - description: 'NetworkType is the plugin that is to be deployed (e.g. - OpenShiftSDN). This should match a value that the cluster-network-operator - understands, or else no networking will be installed. Currently supported - values are: - OpenShiftSDN This field is immutable after installation.' - type: string - serviceNetwork: - description: IP address pool for services. Currently, we only support - a single entry here. This field is immutable after installation. - type: array - items: - type: string - serviceNodePortRange: - description: The port range allowed for Services of type NodePort. If - not specified, the default of 30000-32767 will be used. Such Services - without a NodePort specified will have one automatically allocated - from this range. This parameter can be updated after the cluster is - installed. - type: string - pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - properties: - clusterNetwork: - description: IP address pool to use for pod IPs. - type: array - items: - description: ClusterNetworkEntry is a contiguous block of IP addresses - from which pod IPs are allocated. - type: object - properties: - cidr: - description: The complete block for pod IPs. + hostPrefix: + description: The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset. + type: integer + format: int32 + minimum: 0 + clusterNetworkMTU: + description: ClusterNetworkMTU is the MTU for inter-pod networking. + type: integer + migration: + description: Migration contains the cluster network migration configuration. + type: object + properties: + networkType: + description: 'NetworkType is the target plugin that is to be deployed. Currently supported values are: OpenShiftSDN, OVNKubernetes' + type: string + enum: + - OpenShiftSDN + - OVNKubernetes + networkType: + description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support a single entry here. + type: array + items: type: string - hostPrefix: - description: The size (prefix) of block to allocate to each node. - If this field is not used by the plugin, it can be left unset. - type: integer - format: int32 - minimum: 0 - clusterNetworkMTU: - description: ClusterNetworkMTU is the MTU for inter-pod networking. - type: integer - networkType: - description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). - type: string - serviceNetwork: - description: IP address pool for services. Currently, we only support - a single entry here. - type: array - items: - type: string + served: true + storage: true diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml index d3097b8745..61da741c4b 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml @@ -1,676 +1,432 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: oauths.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: oauths.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: OAuth listKind: OAuthList plural: oauths singular: oauth + scope: Cluster versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - description: OAuth holds cluster-wide information about OAuth. The canonical - name is `cluster`. It is used to configure the integrated OAuth server. - This configuration is only honored when the top level Authentication config - has type set to IntegratedOAuth. - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - type: object - properties: - identityProviders: - description: identityProviders is an ordered list of ways for a user - to identify themselves. When this list is empty, no identities are - provisioned for users. - type: array - items: - description: IdentityProvider provides identities for users authenticating - using credentials - type: object - properties: - basicAuth: - description: basicAuth contains configuration options for the - BasicAuth IdP - type: object - properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - tlsClientCert: - description: tlsClientCert is an optional reference to a - secret by name that contains the PEM-encoded TLS client - certificate to present when connecting to the server. - The key "tls.crt" is used to locate the data. If specified - and the secret or expected key is not found, the identity - provider is not honored. If the specified certificate - data is not valid, the identity provider is not honored. - The namespace for this secret is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - tlsClientKey: - description: tlsClientKey is an optional reference to a - secret by name that contains the PEM-encoded TLS private - key for the client certificate referenced in tlsClientCert. - The key "tls.key" is used to locate the data. If specified - and the secret or expected key is not found, the identity - provider is not honored. If the specified certificate - data is not valid, the identity provider is not honored. - The namespace for this secret is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - url: - description: url is the remote URL to connect to - type: string - github: - description: github enables user authentication using GitHub - credentials - type: object - properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. This can only be configured when hostname is set - to a non-empty value. The namespace for this config map - is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - config map + - name: v1 + schema: + openAPIV3Schema: + description: OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + identityProviders: + description: identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users. + type: array + items: + description: IdentityProvider provides identities for users authenticating using credentials + type: object + properties: + basicAuth: + description: basicAuth contains configuration options for the BasicAuth IdP + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + tlsClientCert: + description: tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key "tls.crt" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tlsClientKey: + description: tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key "tls.key" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + url: + description: url is the remote URL to connect to + type: string + github: + description: github enables user authentication using GitHub credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + hostname: + description: hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname. + type: string + organizations: + description: organizations optionally restricts which organizations are allowed to log in + type: array + items: type: string - clientID: - description: clientID is the oauth client ID - type: string - clientSecret: - description: clientSecret is a required reference to the - secret by name containing the oauth client secret. The - key "clientSecret" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. The namespace for this secret is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - secret + teams: + description: teams optionally restricts which teams are allowed to log in. Format is /. + type: array + items: type: string - hostname: - description: hostname is the optional domain (e.g. "mycompany.com") - for use with a hosted instance of GitHub Enterprise. It - must match the GitHub Enterprise settings value configured - at /setup/settings#hostname. - type: string - organizations: - description: organizations optionally restricts which organizations - are allowed to log in - type: array - items: + gitlab: + description: gitlab enables user authentication using GitLab credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + clientID: + description: clientID is the oauth client ID type: string - teams: - description: teams optionally restricts which teams are - allowed to log in. Format is /. - type: array - items: + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + url: + description: url is the oauth server base URL type: string - gitlab: - description: gitlab enables user authentication using GitLab - credentials - type: object - properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - config map + google: + description: google enables user authentication using Google credentials + type: object + properties: + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + hostedDomain: + description: hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + type: string + htpasswd: + description: htpasswd enables user authentication using an HTPasswd file to validate credentials + type: object + properties: + fileData: + description: fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key "htpasswd" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + keystone: + description: keystone enables user authentication using keystone password credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + domainName: + description: domainName is required for keystone v3 + type: string + tlsClientCert: + description: tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key "tls.crt" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tlsClientKey: + description: tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key "tls.key" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + url: + description: url is the remote URL to connect to + type: string + ldap: + description: ldap enables user authentication using LDAP credentials + type: object + properties: + attributes: + description: attributes maps LDAP attributes to identities + type: object + properties: + email: + description: email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity + type: array + items: + type: string + id: + description: id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is "dn" + type: array + items: + type: string + name: + description: name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is "cn" + type: array + items: + type: string + preferredUsername: + description: preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is "uid" + type: array + items: + type: string + bindDN: + description: bindDN is an optional DN to bind with during the search phase. + type: string + bindPassword: + description: bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key "bindPassword" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + insecure: + description: 'insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always attempt to connect using TLS, even when `insecure` is set to `true` When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.' + type: boolean + url: + description: 'url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter' + type: string + mappingMethod: + description: mappingMethod determines how identities from this provider are mapped to users Defaults to "claim" + type: string + name: + description: 'name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":" Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName' + type: string + openID: + description: openID enables user authentication using OpenID credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + claims: + description: claims mappings + type: object + properties: + email: + description: email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity + type: array + items: + type: string + name: + description: name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity + type: array + items: + type: string + preferredUsername: + description: preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim + type: array + items: + type: string + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + extraAuthorizeParameters: + description: extraAuthorizeParameters are any custom parameters to add to the authorize request. + type: object + additionalProperties: type: string - clientID: - description: clientID is the oauth client ID - type: string - clientSecret: - description: clientSecret is a required reference to the - secret by name containing the oauth client secret. The - key "clientSecret" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. The namespace for this secret is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - secret + extraScopes: + description: extraScopes are any scopes to request in addition to the standard "openid" scope. + type: array + items: type: string - url: - description: url is the oauth server base URL - type: string - google: - description: google enables user authentication using Google - credentials - type: object - properties: - clientID: - description: clientID is the oauth client ID - type: string - clientSecret: - description: clientSecret is a required reference to the - secret by name containing the oauth client secret. The - key "clientSecret" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. The namespace for this secret is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - secret + issuer: + description: issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component. + type: string + requestHeader: + description: requestHeader enables user authentication using request header credentials + type: object + properties: + ca: + description: ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key "ca.crt" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + challengeURL: + description: challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} Required when challenge is set to true. + type: string + clientCommonNames: + description: clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. + type: array + items: type: string - hostedDomain: - description: hostedDomain is the optional Google App domain - (e.g. "mycompany.com") to restrict logins to - type: string - htpasswd: - description: htpasswd enables user authentication using an HTPasswd - file to validate credentials - type: object - properties: - fileData: - description: fileData is a required reference to a secret - by name containing the data to use as the htpasswd file. - The key "htpasswd" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. If the specified htpasswd data is not - valid, the identity provider is not honored. The namespace - for this secret is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - secret + emailHeaders: + description: emailHeaders is the set of headers to check for the email address + type: array + items: type: string - keystone: - description: keystone enables user authentication using keystone - password credentials - type: object - properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - config map + headers: + description: headers is the set of headers to check for identity information + type: array + items: type: string - domainName: - description: domainName is required for keystone v3 - type: string - tlsClientCert: - description: tlsClientCert is an optional reference to a - secret by name that contains the PEM-encoded TLS client - certificate to present when connecting to the server. - The key "tls.crt" is used to locate the data. If specified - and the secret or expected key is not found, the identity - provider is not honored. If the specified certificate - data is not valid, the identity provider is not honored. - The namespace for this secret is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - secret + loginURL: + description: loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} Required when login is set to true. + type: string + nameHeaders: + description: nameHeaders is the set of headers to check for the display name + type: array + items: type: string - tlsClientKey: - description: tlsClientKey is an optional reference to a - secret by name that contains the PEM-encoded TLS private - key for the client certificate referenced in tlsClientCert. - The key "tls.key" is used to locate the data. If specified - and the secret or expected key is not found, the identity - provider is not honored. If the specified certificate - data is not valid, the identity provider is not honored. - The namespace for this secret is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - secret + preferredUsernameHeaders: + description: preferredUsernameHeaders is the set of headers to check for the preferred username + type: array + items: type: string - url: - description: url is the remote URL to connect to - type: string - ldap: - description: ldap enables user authentication using LDAP credentials + type: + description: type identifies the identity provider type for this entry. + type: string + templates: + description: templates allow you to customize pages like the login page. + type: object + properties: + error: + description: error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key "errors.html" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config. type: object + required: + - name properties: - attributes: - description: attributes maps LDAP attributes to identities - type: object - properties: - email: - description: email is the list of attributes whose values - should be used as the email address. Optional. If - unspecified, no email is set for the identity - type: array - items: - type: string - id: - description: id is the list of attributes whose values - should be used as the user ID. Required. First non-empty - attribute is used. At least one attribute is required. - If none of the listed attribute have a value, authentication - fails. LDAP standard identity attribute is "dn" - type: array - items: - type: string - name: - description: name is the list of attributes whose values - should be used as the display name. Optional. If unspecified, - no display name is set for the identity LDAP standard - display name attribute is "cn" - type: array - items: - type: string - preferredUsername: - description: preferredUsername is the list of attributes - whose values should be used as the preferred username. - LDAP standard login attribute is "uid" - type: array - items: - type: string - bindDN: - description: bindDN is an optional DN to bind with during - the search phase. + name: + description: name is the metadata.name of the referenced secret type: string - bindPassword: - description: bindPassword is an optional reference to a - secret by name containing a password to bind with during - the search phase. The key "bindPassword" is used to locate - the data. If specified and the secret or expected key - is not found, the identity provider is not honored. The - namespace for this secret is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - insecure: - description: 'insecure, if true, indicates the connection - should not use TLS WARNING: Should not be set to `true` - with the URL scheme "ldaps://" as "ldaps://" URLs always attempt - to connect using TLS, even when `insecure` is set to `true` - When `true`, "ldap://" URLS connect insecurely. When `false`, - "ldap://" URLs are upgraded to a TLS connection using - StartTLS as specified in https://tools.ietf.org/html/rfc2830.' - type: boolean - url: - description: 'url is an RFC 2255 URL which specifies the - LDAP search parameters to use. The syntax of the URL is: - ldap://host:port/basedn?attribute?scope?filter' - type: string - mappingMethod: - description: mappingMethod determines how identities from this - provider are mapped to users Defaults to "claim" - type: string - name: - description: 'name is used to qualify the identities returned - by this provider. - It MUST be unique and not shared by any - other identity provider used - It MUST be a valid path segment: - name cannot equal "." or ".." or contain "/" or "%" or ":" Ref: - https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName' - type: string - openID: - description: openID enables user authentication using OpenID - credentials + login: + description: login is the name of a secret that specifies a go template to use to render the login page. The key "login.html" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config. type: object + required: + - name properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - claims: - description: claims mappings - type: object - properties: - email: - description: email is the list of claims whose values - should be used as the email address. Optional. If - unspecified, no email is set for the identity - type: array - items: - type: string - name: - description: name is the list of claims whose values - should be used as the display name. Optional. If unspecified, - no display name is set for the identity - type: array - items: - type: string - preferredUsername: - description: preferredUsername is the list of claims - whose values should be used as the preferred username. - If unspecified, the preferred username is determined - from the value of the sub claim - type: array - items: - type: string - clientID: - description: clientID is the oauth client ID + name: + description: name is the metadata.name of the referenced secret type: string - clientSecret: - description: clientSecret is a required reference to the - secret by name containing the oauth client secret. The - key "clientSecret" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. The namespace for this secret is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - extraAuthorizeParameters: - description: extraAuthorizeParameters are any custom parameters - to add to the authorize request. - type: object - additionalProperties: - type: string - extraScopes: - description: extraScopes are any scopes to request in addition - to the standard "openid" scope. - type: array - items: - type: string - issuer: - description: issuer is the URL that the OpenID Provider - asserts as its Issuer Identifier. It must use the https - scheme with no query or fragment component. - type: string - requestHeader: - description: requestHeader enables user authentication using - request header credentials + providerSelection: + description: providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key "providers.html" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config. type: object + required: + - name properties: - ca: - description: ca is a required reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. Specifically, it allows verification - of incoming requests to prevent header spoofing. The key - "ca.crt" is used to locate the data. If the config map - or expected key is not found, the identity provider is - not honored. If the specified ca data is not valid, the - identity provider is not honored. The namespace for this - config map is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - challengeURL: - description: challengeURL is a URL to redirect unauthenticated - /authorize requests to Unauthenticated requests from OAuth - clients which expect WWW-Authenticate challenges will - be redirected here. ${url} is replaced with the current - URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} - ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} - Required when challenge is set to true. + name: + description: name is the metadata.name of the referenced secret type: string - clientCommonNames: - description: clientCommonNames is an optional list of common - names to require a match from. If empty, any client certificate - validated against the clientCA bundle is considered authoritative. - type: array - items: - type: string - emailHeaders: - description: emailHeaders is the set of headers to check - for the email address - type: array - items: - type: string - headers: - description: headers is the set of headers to check for - identity information - type: array - items: - type: string - loginURL: - description: loginURL is a URL to redirect unauthenticated - /authorize requests to Unauthenticated requests from OAuth - clients which expect interactive logins will be redirected - here ${url} is replaced with the current URL, escaped - to be safe in a query parameter https://www.example.com/sso-login?then=${url} - ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} - Required when login is set to true. - type: string - nameHeaders: - description: nameHeaders is the set of headers to check - for the display name - type: array - items: - type: string - preferredUsernameHeaders: - description: preferredUsernameHeaders is the set of headers - to check for the preferred username - type: array - items: - type: string - type: - description: type identifies the identity provider type for - this entry. + tokenConfig: + description: tokenConfig contains options for authorization and access tokens + type: object + properties: + accessTokenInactivityTimeout: + description: accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as "5m", "1.5h" or "2h45m". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime. type: string - templates: - description: templates allow you to customize pages like the login - page. - type: object - properties: - error: - description: error is the name of a secret that specifies a go - template to use to render error pages during the authentication - or grant flow. The key "errors.html" is used to locate the template - data. If specified and the secret or expected key is not found, - the default error page is used. If the specified template is - not valid, the default error page is used. If unspecified, the - default error page is used. The namespace for this secret is - openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - login: - description: login is the name of a secret that specifies a go - template to use to render the login page. The key "login.html" - is used to locate the template data. If specified and the secret - or expected key is not found, the default login page is used. - If the specified template is not valid, the default login page - is used. If unspecified, the default login page is used. The - namespace for this secret is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - providerSelection: - description: providerSelection is the name of a secret that specifies - a go template to use to render the provider selection page. - The key "providers.html" is used to locate the template data. - If specified and the secret or expected key is not found, the - default provider selection page is used. If the specified template - is not valid, the default provider selection page is used. If - unspecified, the default provider selection page is used. The - namespace for this secret is openshift-config. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - tokenConfig: - description: tokenConfig contains options for authorization and access - tokens - type: object - properties: - accessTokenInactivityTimeout: - description: accessTokenInactivityTimeout defines the token inactivity - timeout for tokens granted by any client. The value represents - the maximum amount of time that can occur between consecutive - uses of the token. Tokens become invalid if they are not used - within this temporal window. The user will need to acquire a - new token to regain access once a token times out. Takes valid - time duration string such as "5m", "1.5h" or "2h45m". The minimum - allowed value for duration is 300s (5 minutes). If the timeout - is configured per client, then that value takes precedence. - If the timeout value is not specified and the client does not - override the value, then tokens are valid until their lifetime. - type: string - accessTokenInactivityTimeoutSeconds: - description: 'accessTokenInactivityTimeoutSeconds - DEPRECATED: - setting this field has no effect.' - type: integer - format: int32 - accessTokenMaxAgeSeconds: - description: accessTokenMaxAgeSeconds defines the maximum age - of access tokens - type: integer - format: int32 - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object + accessTokenInactivityTimeoutSeconds: + description: 'accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.' + type: integer + format: int32 + accessTokenMaxAgeSeconds: + description: accessTokenMaxAgeSeconds defines the maximum age of access tokens + type: integer + format: int32 + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml index 6de3040722..ccc0981e1a 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml @@ -1,66 +1,55 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: projects.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: projects.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: Project listKind: ProjectList plural: projects singular: project + scope: Cluster versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - description: Project holds cluster-wide information about Project. The canonical - name is `cluster` - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - type: object - properties: - projectRequestMessage: - description: projectRequestMessage is the string presented to a user - if they are unable to request a project via the projectrequest api - endpoint - type: string - projectRequestTemplate: - description: projectRequestTemplate is the template to use for creating - projects in response to projectrequest. This must point to a template - in 'openshift-config' namespace. It is optional. If it is not specified, - a default template is used. - type: object - properties: - name: - description: name is the metadata.name of the referenced project - request template - type: string - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object + - name: v1 + schema: + openAPIV3Schema: + description: Project holds cluster-wide information about Project. The canonical name is `cluster` + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + projectRequestMessage: + description: projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + type: string + projectRequestTemplate: + description: projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used. + type: object + properties: + name: + description: name is the metadata.name of the referenced project request template + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml index c66ec6ad95..86122ed06c 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml @@ -1,106 +1,68 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: schedulers.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: schedulers.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: Scheduler - singular: scheduler - plural: schedulers listKind: SchedulerList + plural: schedulers + singular: scheduler + scope: Cluster versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - description: Scheduler holds cluster-wide config information to run the Kubernetes - Scheduler and influence its placement decisions. The canonical name for - this config is `cluster`. - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - type: object - properties: - defaultNodeSelector: - description: 'defaultNodeSelector helps set the cluster-wide default - node selector to restrict pod placement to specific nodes. This - is applied to the pods created in all namespaces and creates an - intersection with any existing nodeSelectors already set on a pod, - additionally constraining that pod''s selector. For example, defaultNodeSelector: - "type=user-node,region=east" would set nodeSelector field in pod - spec to "type=user-node,region=east" to all pods created in all - namespaces. Namespaces having project-wide node selectors won''t - be impacted even if this field is set. This adds an annotation section - to the namespace. For example, if a new namespace is created with - node-selector=''type=user-node,region=east'', the annotation openshift.io/node-selector: - type=user-node,region=east gets added to the project. When the openshift.io/node-selector - annotation is set on the project the value is used in preference - to the value we are setting for defaultNodeSelector field. For instance, - openshift.io/node-selector: "type=user-node,region=west" means that - the default of "type=user-node,region=east" set in defaultNodeSelector - would not be applied.' - type: string - mastersSchedulable: - description: 'MastersSchedulable allows masters nodes to be schedulable. - When this flag is turned on, all the master nodes in the cluster - will be made schedulable, so that workload pods can run on them. - The default value for this field is false, meaning none of the master - nodes are schedulable. Important Note: Once the workload pods start - running on the master nodes, extreme care must be taken to ensure - that cluster-critical control plane components are not impacted. - Please turn on this field after doing due diligence.' - type: boolean - policy: - description: 'DEPRECATED: the scheduler Policy API has been deprecated - and will be removed in a future release. policy is a reference to - a ConfigMap containing scheduler policy which has user specified - predicates and priorities. If this ConfigMap is not available scheduler - will default to use DefaultAlgorithmProvider. The namespace for - this configmap is openshift-config.' - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - profile: - description: "profile sets which scheduling profile should be set - in order to configure scheduling decisions for new pods. \n Valid - values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" - Defaults to \"LowNodeUtilization\"" - type: string - enum: - - "" - - LowNodeUtilization - - HighNodeUtilization - - NoScoring - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object + - name: v1 + schema: + openAPIV3Schema: + description: Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + defaultNodeSelector: + description: 'defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod''s selector. For example, defaultNodeSelector: "type=user-node,region=east" would set nodeSelector field in pod spec to "type=user-node,region=east" to all pods created in all namespaces. Namespaces having project-wide node selectors won''t be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector=''type=user-node,region=east'', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: "type=user-node,region=west" means that the default of "type=user-node,region=east" set in defaultNodeSelector would not be applied.' + type: string + mastersSchedulable: + description: 'MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.' + type: boolean + policy: + description: 'DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + profile: + description: "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods. \n Valid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"" + type: string + enum: + - "" + - LowNodeUtilization + - HighNodeUtilization + - NoScoring + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 42268db39b..2e51930fa5 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -46,9 +46,9 @@ type APIServerSpec struct { Encryption APIServerEncryption `json:"encryption"` // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. // - // If unset, a default (which may change between releases) is chosen. Note that only Old and - // Intermediate profiles are currently supported, and the maximum available MinTLSVersions - // is VersionTLS12. + // If unset, a default (which may change between releases) is chosen. Note that only Old, + // Intermediate and Custom profiles are currently supported, and the maximum available + // MinTLSVersions is VersionTLS12. // +optional TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` // audit specifies the settings for audit configuration to be applied to all OpenShift-provided @@ -59,12 +59,15 @@ type APIServerSpec struct { } // AuditProfileType defines the audit policy profile type. -// +kubebuilder:validation:Enum=Default;WriteRequestBodies;AllRequestBodies +// +kubebuilder:validation:Enum=Default;WriteRequestBodies;AllRequestBodies;None type AuditProfileType string const ( + // "None" disables audit logs. + NoneAuditProfileType AuditProfileType = "None" + // "Default" is the existing default audit configuration policy. - AuditProfileDefaultType AuditProfileType = "Default" + DefaultAuditProfileType AuditProfileType = "Default" // "WriteRequestBodies" is similar to Default but it logs request and response // HTTP payloads for write requests (create, update, patch) @@ -76,6 +79,47 @@ const ( ) type Audit struct { + // profile specifies the name of the desired top-level audit profile to be applied to all requests + // sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, + // openshift-apiserver and oauth-apiserver), with the exception of those requests that match + // one or more of the customRules. + // + // The following profiles are provided: + // - Default: default policy which means MetaData level logging with the exception of events + // (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody + // level). + // - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for + // write requests (create, update, patch). + // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response + // HTTP payloads for read requests (get, list). + // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. + // + // Warning: to raise a Red Hat support request, it is required to set this to Default, + // WriteRequestBodies, or AllRequestBodies to generate audit log events that can be + // analyzed by support. + // + // If unset, the 'Default' profile is used as the default. + // + // +kubebuilder:default=Default + Profile AuditProfileType `json:"profile,omitempty"` + // customRules specify profiles per group. These profile take precedence over the + // top-level profile field if they apply. They are evaluation from top to bottom and + // the first one that matches, applies. + // +listType=map + // +listMapKey=group + // +optional + CustomRules []AuditCustomRule `json:"customRules,omitempty"` +} + +// AuditCustomRule describes a custom rule for an audit profile that takes precedence over +// the top-level profile. +type AuditCustomRule struct { + // group is a name of group a request user must be member of in order to this profile to apply. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + Group string `json:"group"` // profile specifies the name of the desired audit policy configuration to be deployed to // all OpenShift-provided API servers in the cluster. // @@ -85,9 +129,12 @@ type Audit struct { // write requests (create, update, patch). // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response // HTTP payloads for read requests (get, list). + // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. // // If unset, the 'Default' profile is used as the default. - // +kubebuilder:default=Default + // + // +kubebuilder:validation:Required + // +required Profile AuditProfileType `json:"profile,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go index 299adb1c9f..3339a8688c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -142,6 +142,8 @@ type ClusterStatusConditionType string const ( // Available indicates that the operand (eg: openshift-apiserver for the // openshift-apiserver-operator), is functional and available in the cluster. + // Available=False means at least part of the component is non-functional, + // and that the condition requires immediate administrator intervention. OperatorAvailable ClusterStatusConditionType = "Available" // Progressing indicates that the operator is actively rolling out new code, @@ -162,13 +164,13 @@ const ( // persist over a long enough period to report Degraded. A service should not // report Degraded during the course of a normal upgrade. A service may report // Degraded in response to a persistent infrastructure failure that requires - // administrator intervention. For example, if a control plane host is unhealthy - // and must be replaced. An operator should report Degraded if unexpected - // errors occur over a period, but the expectation is that all unexpected errors - // are handled as operators mature. + // eventual administrator intervention. For example, if a control plane host + // is unhealthy and must be replaced. An operator should report Degraded if + // unexpected errors occur over a period, but the expectation is that all + // unexpected errors are handled as operators mature. OperatorDegraded ClusterStatusConditionType = "Degraded" - // Upgradeable indicates whether the operator is in a state that is safe to upgrade. When status is `False` + // Upgradeable indicates whether the operator safe to upgrade based on the current cluster state. When status is `False` // administrators should not upgrade their cluster and the message field should contain a human readable description // of what the administrator should do to allow the operator to successfully update. A missing condition, True, // and Unknown are all treated by the CVO as allowing an upgrade. diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 7cb30c5d34..dba9aa00ba 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -105,7 +105,14 @@ var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ Enabled: []string{}, Disabled: []string{}, }, - TechPreviewNoUpgrade: newDefaultFeatures().toFeatures(), + TechPreviewNoUpgrade: newDefaultFeatures(). + with("CSIDriverAzureDisk"). // sig-storage, jsafrane, OCP specific + with("CSIDriverVSphere"). // sig-storage, jsafrane, OCP specific + with("CSIMigrationAWS"). // sig-storage, jsafrane, Kubernetes feature gate + with("CSIMigrationOpenStack"). // sig-storage, jsafrane, Kubernetes feature gate + with("CSIMigrationGCE"). // sig-storage, fbertina, Kubernetes feature gate + with("CSIMigrationAzureDisk"). // sig-storage, fbertina, Kubernetes feature gate + toFeatures(), LatencySensitive: newDefaultFeatures(). with( "TopologyManager", // sig-pod, sjenning @@ -125,7 +132,7 @@ var defaultFeatures = &FeatureGateEnabledDisabled{ "SupportPodPidsLimit", // sig-pod, sjenning "NodeDisruptionExclusion", // sig-scheduling, ccoleman "ServiceNodeExclusion", // sig-scheduling, ccoleman - "SCTPSupport", // sig-network, ccallend + "DownwardAPIHugePages", // sig-node, rphillips }, Disabled: []string{ "LegacyNodeRoleBehavior", // sig-scheduling, ccoleman diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index d5ebcc91c5..19924323e1 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -82,7 +82,10 @@ type InfrastructureStatus struct { // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. // The 'SingleReplica' mode will be used in single-node deployments // and the operators should not configure the operand for highly-available operation + // The 'External' mode indicates that the control plane is hosted externally to the cluster and that + // its components are not visible within the cluster. // +kubebuilder:default=HighlyAvailable + // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica;External ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"` // infrastructureTopology expresses the expectations for infrastructure services that do not run on control @@ -91,12 +94,16 @@ type InfrastructureStatus struct { // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. // The 'SingleReplica' mode will be used in single-node deployments // and the operators should not configure the operand for highly-available operation + // NOTE: External topology mode is not applicable for this field. // +kubebuilder:default=HighlyAvailable + // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica InfrastructureTopology TopologyMode `json:"infrastructureTopology"` } // TopologyMode defines the topology mode of the control/infra nodes. -// +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica +// NOTE: Enum validation is specified in each field that uses this type, +// given that External value is not applicable to the InfrastructureTopology +// field. type TopologyMode string const ( @@ -105,6 +112,12 @@ const ( // "SingleReplica" is for operators to avoid spending resources for high-availability purpose. SingleReplicaTopologyMode TopologyMode = "SingleReplica" + + // "External" indicates that the component is running externally to the cluster. When specified + // as the control plane topology, operators should avoid scheduling workloads to masters or assume + // that any of the control plane components such as kubernetes API server or etcd are visible within + // the cluster. + ExternalTopologyMode TopologyMode = "External" ) // PlatformType is a specific supported infrastructure provider. @@ -313,6 +326,34 @@ type AWSPlatformStatus struct { // There must be only one ServiceEndpoint for a service. // +optional ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` + + // resourceTags is a list of additional tags to apply to AWS resources created for the cluster. + // See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. + // AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags + // available for the user. + // +kubebuilder:validation:MaxItems=25 + // +optional + ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` +} + +// AWSResourceTag is a tag to apply to AWS resources created for the cluster. +type AWSResourceTag struct { + // key is the key of the tag + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +required + Key string `json:"key"` + // value is the value of the tag. + // Some AWS service do not support empty values. Since tags are added to resources in many services, the + // length of the tag value must meet the requirements of all services. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +required + Value string `json:"value"` } // AzurePlatformSpec holds the desired state of the Azure infrastructure provider. @@ -334,10 +375,14 @@ type AzurePlatformStatus struct { // If empty, the value is equal to `AzurePublicCloud`. // +optional CloudName AzureCloudEnvironment `json:"cloudName,omitempty"` + + // armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. + // +optional + ARMEndpoint string `json:"armEndpoint,omitempty"` } // AzureCloudEnvironment is the name of the Azure cloud environment -// +kubebuilder:validation:Enum="";AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud +// +kubebuilder:validation:Enum="";AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud;AzureStackCloud type AzureCloudEnvironment string const ( @@ -352,6 +397,9 @@ const ( // AzureGermanCloud is the Azure cloud environment used in Germany. AzureGermanCloud AzureCloudEnvironment = "AzureGermanCloud" + + // AzureStackCloud is the Azure cloud environment used at the edge and on premises. + AzureStackCloud AzureCloudEnvironment = "AzureStackCloud" ) // GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. @@ -482,6 +530,10 @@ type IBMCloudPlatformStatus struct { // ProviderType indicates the type of cluster that was created ProviderType IBMCloudProviderType `json:"providerType,omitempty"` + + // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + // the DNS zone for the cluster's base domain + CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` } // KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go index 4da914ba11..9451adc278 100644 --- a/vendor/github.com/openshift/api/config/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -1,6 +1,8 @@ package v1 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) // +genclient // +genclient:nonNamespaced @@ -41,13 +43,136 @@ type IngressSpec struct { // certificate. // +optional AppsDomain string `json:"appsDomain,omitempty"` + + // componentRoutes is an optional list of routes that are managed by OpenShift components + // that a cluster-admin is able to configure the hostname and serving certificate for. + // The namespace and name of each route in this list should match an existing entry in the + // status.componentRoutes list. + // + // To determine the set of configurable Routes, look at namespace and name of entries in the + // .status.componentRoutes list, where participating operators write the status of + // configurable routes. + // +optional + ComponentRoutes []ComponentRouteSpec `json:"componentRoutes,omitempty"` } +// ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. +// +kubebuilder:validation:Pattern="^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=512 +type ConsumingUser string + +// Hostname is an alias for hostname string validation. +// +kubebuilder:validation:Format=hostname +type Hostname string + type IngressStatus struct { + // componentRoutes is where participating operators place the current route status for routes whose + // hostnames and serving certificates can be customized by the cluster-admin. + // +optional + ComponentRoutes []ComponentRouteStatus `json:"componentRoutes,omitempty"` } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// ComponentRouteSpec allows for configuration of a route's hostname and serving certificate. +type ComponentRouteSpec struct { + // namespace is the namespace of the route to customize. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of status.componentRoutes if the route is to be customized. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Required + // +required + Namespace string `json:"namespace"` + + // name is the logical name of the route to customize. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of status.componentRoutes if the route is to be customized. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // hostname is the hostname that should be used by the route. + // +kubebuilder:validation:Required + // +required + Hostname Hostname `json:"hostname"` + + // servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. + // The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. + // If the custom hostname uses the default routing suffix of the cluster, + // the Secret specification for a serving certificate will not be needed. + // +optional + ServingCertKeyPairSecret SecretNameReference `json:"servingCertKeyPairSecret"` +} + +// ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate. +type ComponentRouteStatus struct { + // namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace + // ensures that no two components will conflict and the same component can be installed multiple times. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of spec.componentRoutes if the route is to be customized. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Required + // +required + Namespace string `json:"namespace"` + + // name is the logical name of the route to customize. It does not have to be the actual name of a route resource + // but it cannot be renamed. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of spec.componentRoutes if the route is to be customized. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // defaultHostname is the hostname of this route prior to customization. + // +kubebuilder:validation:Required + // +required + DefaultHostname Hostname `json:"defaultHostname"` + + // consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret. + // +kubebuilder:validation:MaxItems=5 + // +optional + ConsumingUsers []ConsumingUser `json:"consumingUsers,omitempty"` + + // currentHostnames is the list of current names used by the route. Typically, this list should consist of a single + // hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list. + // +kubebuilder:validation:MinItems=1 + // +optional + CurrentHostnames []Hostname `json:"currentHostnames,omitempty"` + // conditions are used to communicate the state of the componentRoutes entry. + // + // Supported conditions include Available, Degraded and Progressing. + // + // If available is true, the content served by the route can be accessed by users. This includes cases + // where a default may continue to serve content while the customized route specified by the cluster-admin + // is being configured. + // + // If Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. + // The currentHostnames field may or may not be in effect. + // + // If Progressing is true, that means the component is taking some action related to the componentRoutes entry. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:Required + // +required + RelatedObjects []ObjectReference `json:"relatedObjects"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type IngressList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata"` diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index 257b54b08d..ebfdf01626 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -76,6 +76,9 @@ type NetworkStatus struct { // ClusterNetworkMTU is the MTU for inter-pod networking. ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"` + + // Migration contains the cluster network migration configuration. + Migration *NetworkMigration `json:"migration,omitempty"` } // ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs @@ -131,3 +134,11 @@ type NetworkList struct { Items []Network `json:"items"` } + +// NetworkMigration represents the cluster network configuration. +type NetworkMigration struct { + // NetworkType is the target plugin that is to be deployed. + // Currently supported values are: OpenShiftSDN, OVNKubernetes + // +kubebuilder:validation:Enum={"OpenShiftSDN","OVNKubernetes"} + NetworkType string `json:"networkType"` +} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 16cdc034c5..7b6debd718 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -148,7 +148,7 @@ func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) { *out = new(TLSSecurityProfile) (*in).DeepCopyInto(*out) } - out.Audit = in.Audit + in.Audit.DeepCopyInto(&out.Audit) return } @@ -207,6 +207,11 @@ func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) { *out = make([]AWSServiceEndpoint, len(*in)) copy(*out, *in) } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AWSResourceTag, len(*in)) + copy(*out, *in) + } return } @@ -220,6 +225,22 @@ func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceTag) DeepCopyInto(out *AWSResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceTag. +func (in *AWSResourceTag) DeepCopy() *AWSResourceTag { + if in == nil { + return nil + } + out := new(AWSResourceTag) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSServiceEndpoint) DeepCopyInto(out *AWSServiceEndpoint) { *out = *in @@ -289,6 +310,11 @@ func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Audit) DeepCopyInto(out *Audit) { *out = *in + if in.CustomRules != nil { + in, out := &in.CustomRules, &out.CustomRules + *out = make([]AuditCustomRule, len(*in)) + copy(*out, *in) + } return } @@ -319,6 +345,22 @@ func (in *AuditConfig) DeepCopy() *AuditConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditCustomRule) DeepCopyInto(out *AuditCustomRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditCustomRule. +func (in *AuditCustomRule) DeepCopy() *AuditCustomRule { + if in == nil { + return nil + } + out := new(AuditCustomRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Authentication) DeepCopyInto(out *Authentication) { *out = *in @@ -980,6 +1022,61 @@ func (in *ComponentOverride) DeepCopy() *ComponentOverride { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentRouteSpec) DeepCopyInto(out *ComponentRouteSpec) { + *out = *in + out.ServingCertKeyPairSecret = in.ServingCertKeyPairSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteSpec. +func (in *ComponentRouteSpec) DeepCopy() *ComponentRouteSpec { + if in == nil { + return nil + } + out := new(ComponentRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentRouteStatus) DeepCopyInto(out *ComponentRouteStatus) { + *out = *in + if in.ConsumingUsers != nil { + in, out := &in.ConsumingUsers, &out.ConsumingUsers + *out = make([]ConsumingUser, len(*in)) + copy(*out, *in) + } + if in.CurrentHostnames != nil { + in, out := &in.CurrentHostnames, &out.CurrentHostnames + *out = make([]Hostname, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelatedObjects != nil { + in, out := &in.RelatedObjects, &out.RelatedObjects + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteStatus. +func (in *ComponentRouteStatus) DeepCopy() *ComponentRouteStatus { + if in == nil { + return nil + } + out := new(ComponentRouteStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConfigMapFileReference) DeepCopyInto(out *ConfigMapFileReference) { *out = *in @@ -2153,8 +2250,8 @@ func (in *Ingress) DeepCopyInto(out *Ingress) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) return } @@ -2212,6 +2309,11 @@ func (in *IngressList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { *out = *in + if in.ComponentRoutes != nil { + in, out := &in.ComponentRoutes, &out.ComponentRoutes + *out = make([]ComponentRouteSpec, len(*in)) + copy(*out, *in) + } return } @@ -2228,6 +2330,13 @@ func (in *IngressSpec) DeepCopy() *IngressSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { *out = *in + if in.ComponentRoutes != nil { + in, out := &in.ComponentRoutes, &out.ComponentRoutes + *out = make([]ComponentRouteStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -2496,6 +2605,22 @@ func (in *NetworkList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkMigration) DeepCopyInto(out *NetworkMigration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMigration. +func (in *NetworkMigration) DeepCopy() *NetworkMigration { + if in == nil { + return nil + } + out := new(NetworkMigration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { *out = *in @@ -2540,6 +2665,11 @@ func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Migration != nil { + in, out := &in.Migration, &out.Migration + *out = new(NetworkMigration) + **out = **in + } return } diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index b038e8ec40..c086c5a986 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -284,7 +284,7 @@ var map_APIServerSpec = map[string]string{ "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.", "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.", - "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old and Intermediate profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12.", "audit": "audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster.", } @@ -293,13 +293,24 @@ func (APIServerSpec) SwaggerDoc() map[string]string { } var map_Audit = map[string]string{ - "profile": "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster.\n\nThe following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list).\n\nIf unset, the 'Default' profile is used as the default.", + "profile": "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules.\n\nThe following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events\n (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody\n level).\n- WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nWarning: to raise a Red Hat support request, it is required to set this to Default, WriteRequestBodies, or AllRequestBodies to generate audit log events that can be analyzed by support.\n\nIf unset, the 'Default' profile is used as the default.", + "customRules": "customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies.", } func (Audit) SwaggerDoc() map[string]string { return map_Audit } +var map_AuditCustomRule = map[string]string{ + "": "AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile.", + "group": "group is a name of group a request user must be member of in order to this profile to apply.", + "profile": "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster.\n\nThe following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nIf unset, the 'Default' profile is used as the default.", +} + +func (AuditCustomRule) SwaggerDoc() map[string]string { + return map_AuditCustomRule +} + var map_Authentication = map[string]string{ "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.", "spec": "spec holds user settable values for configuration", @@ -725,12 +736,23 @@ var map_AWSPlatformStatus = map[string]string{ "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.", "region": "region holds the default AWS region for new AWS resources created by the cluster.", "serviceEndpoints": "ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", + "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.", } func (AWSPlatformStatus) SwaggerDoc() map[string]string { return map_AWSPlatformStatus } +var map_AWSResourceTag = map[string]string{ + "": "AWSResourceTag is a tag to apply to AWS resources created for the cluster.", + "key": "key is the key of the tag", + "value": "value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", +} + +func (AWSResourceTag) SwaggerDoc() map[string]string { + return map_AWSResourceTag +} + var map_AWSServiceEndpoint = map[string]string{ "": "AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services.", "name": "name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty.", @@ -754,6 +776,7 @@ var map_AzurePlatformStatus = map[string]string{ "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.", "networkResourceGroupName": "networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName.", "cloudName": "cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`.", + "armEndpoint": "armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.", } func (AzurePlatformStatus) SwaggerDoc() map[string]string { @@ -828,6 +851,7 @@ var map_IBMCloudPlatformStatus = map[string]string{ "location": "Location is where the cluster has been deployed", "resourceGroupName": "ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", "providerType": "ProviderType indicates the type of cluster that was created", + "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", } func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { @@ -870,8 +894,8 @@ var map_InfrastructureStatus = map[string]string{ "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.", "apiServerURL": "apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.", "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.", - "controlPlaneTopology": "controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation", - "infrastructureTopology": "infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation", + "controlPlaneTopology": "controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster.", + "infrastructureTopology": "infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.", } func (InfrastructureStatus) SwaggerDoc() map[string]string { @@ -992,6 +1016,33 @@ func (VSpherePlatformStatus) SwaggerDoc() map[string]string { return map_VSpherePlatformStatus } +var map_ComponentRouteSpec = map[string]string{ + "": "ComponentRouteSpec allows for configuration of a route's hostname and serving certificate.", + "namespace": "namespace is the namespace of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.", + "name": "name is the logical name of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.", + "hostname": "hostname is the hostname that should be used by the route.", + "servingCertKeyPairSecret": "servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.", +} + +func (ComponentRouteSpec) SwaggerDoc() map[string]string { + return map_ComponentRouteSpec +} + +var map_ComponentRouteStatus = map[string]string{ + "": "ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate.", + "namespace": "namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace ensures that no two components will conflict and the same component can be installed multiple times.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.", + "name": "name is the logical name of the route to customize. It does not have to be the actual name of a route resource but it cannot be renamed.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.", + "defaultHostname": "defaultHostname is the hostname of this route prior to customization.", + "consumingUsers": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret.", + "currentHostnames": "currentHostnames is the list of current names used by the route. Typically, this list should consist of a single hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list.", + "conditions": "conditions are used to communicate the state of the componentRoutes entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf available is true, the content served by the route can be accessed by users. This includes cases where a default may continue to serve content while the customized route specified by the cluster-admin is being configured.\n\nIf Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. The currentHostnames field may or may not be in effect.\n\nIf Progressing is true, that means the component is taking some action related to the componentRoutes entry.", + "relatedObjects": "relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied.", +} + +func (ComponentRouteStatus) SwaggerDoc() map[string]string { + return map_ComponentRouteStatus +} + var map_Ingress = map[string]string{ "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.", "spec": "spec holds user settable values for configuration", @@ -1003,14 +1054,23 @@ func (Ingress) SwaggerDoc() map[string]string { } var map_IngressSpec = map[string]string{ - "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\".\n\nOnce set, changing domain is not currently supported.", - "appsDomain": "appsDomain is an optional domain to use instead of the one specified in the domain field when a Route is created without specifying an explicit host. If appsDomain is nonempty, this value is used to generate default host values for Route. Unlike domain, appsDomain may be modified after installation. This assumes a new ingresscontroller has been setup with a wildcard certificate.", + "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\".\n\nOnce set, changing domain is not currently supported.", + "appsDomain": "appsDomain is an optional domain to use instead of the one specified in the domain field when a Route is created without specifying an explicit host. If appsDomain is nonempty, this value is used to generate default host values for Route. Unlike domain, appsDomain may be modified after installation. This assumes a new ingresscontroller has been setup with a wildcard certificate.", + "componentRoutes": "componentRoutes is an optional list of routes that are managed by OpenShift components that a cluster-admin is able to configure the hostname and serving certificate for. The namespace and name of each route in this list should match an existing entry in the status.componentRoutes list.\n\nTo determine the set of configurable Routes, look at namespace and name of entries in the .status.componentRoutes list, where participating operators write the status of configurable routes.", } func (IngressSpec) SwaggerDoc() map[string]string { return map_IngressSpec } +var map_IngressStatus = map[string]string{ + "componentRoutes": "componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin.", +} + +func (IngressStatus) SwaggerDoc() map[string]string { + return map_IngressStatus +} + var map_ClusterNetworkEntry = map[string]string{ "": "ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated.", "cidr": "The complete block for pod IPs.", @@ -1051,6 +1111,15 @@ func (Network) SwaggerDoc() map[string]string { return map_Network } +var map_NetworkMigration = map[string]string{ + "": "NetworkMigration represents the cluster network configuration.", + "networkType": "NetworkType is the target plugin that is to be deployed. Currently supported values are: OpenShiftSDN, OVNKubernetes", +} + +func (NetworkMigration) SwaggerDoc() map[string]string { + return map_NetworkMigration +} + var map_NetworkSpec = map[string]string{ "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.", @@ -1070,6 +1139,7 @@ var map_NetworkStatus = map[string]string{ "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.", "networkType": "NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).", "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.", + "migration": "Migration contains the cluster network migration configuration.", } func (NetworkStatus) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml new file mode 100644 index 0000000000..0284f68330 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml @@ -0,0 +1,136 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/612 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: configs.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: Config + plural: configs + singular: config + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Config provides information to configure the config operator. It handles installation, migration or synchronization of cloud based cluster configurations like AWS or Azure. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the Config Operator. + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status defines the observed status of the Config Operator. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml new file mode 100644 index 0000000000..10f7e21687 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml @@ -0,0 +1,186 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/752 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: etcds.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: Etcd + plural: etcds + singular: etcd + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Etcd provides information to configure an operator to manage etcd. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + type: integer + format: int32 + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config. + type: string + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + type: integer + format: int32 + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most recent deployment + type: integer + format: int32 + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across individual nodes + type: array + items: + description: NodeStatus provides information about the current state of a particular node managed by this operator. + type: object + properties: + currentRevision: + description: currentRevision is the generation of the most recently successful deployment + type: integer + format: int32 + lastFailedCount: + description: lastFailedCount is how often the last failed revision failed. + type: integer + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment we tried and failed to deploy. + type: integer + format: int32 + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of the errors during the failed deployment referenced in lastFailedRevision + type: array + items: + type: string + lastFailedTime: + description: lastFailedTime is the time the last failed revision failed the last time. + type: string + format: date-time + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment we're trying to apply + type: integer + format: int32 + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml new file mode 100644 index 0000000000..63ba8cdded --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml @@ -0,0 +1,233 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: kubeapiservers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: KubeAPIServer + plural: kubeapiservers + singular: kubeapiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: KubeAPIServer provides information to configure an operator to + manage kube-apiserver. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Kubernetes API Server + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod + installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment + of the operand by providing a unique string. This provides a mechanism + to kick a previously failed deployment and provide a reason why + you think it will work this time instead of failing again on the + same config. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static + pod installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Kubernetes + API Server + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently + successful deployment + format: int32 + type: integer + lastFailedCount: + description: lastFailedCount is how often the last failed revision + failed. + type: integer + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of the errors + during the failed deployment referenced in lastFailedRevision + items: + type: string + type: array + lastFailedTime: + description: lastFailedTime is the time the last failed revision + failed the last time. + format: date-time + type: string + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment + we're trying to apply + format: int32 + type: integer + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml-patch new file mode 100644 index 0000000000..8145f00c49 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml-patch @@ -0,0 +1,3 @@ +- op: replace + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/managementState/pattern + value: "^(Managed|Force)$" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml new file mode 100644 index 0000000000..94b2820bf8 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml @@ -0,0 +1,235 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: kubecontrollermanagers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: KubeControllerManager + plural: kubecontrollermanagers + singular: kubecontrollermanager + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: KubeControllerManager provides information to configure an operator + to manage kube-controller-manager. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Kubernetes Controller Manager + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod + installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment + of the operand by providing a unique string. This provides a mechanism + to kick a previously failed deployment and provide a reason why + you think it will work this time instead of failing again on the + same config. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static + pod installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Kubernetes + Controller Manager + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently + successful deployment + format: int32 + type: integer + lastFailedCount: + description: lastFailedCount is how often the last failed revision + failed. + type: integer + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of the errors + during the failed deployment referenced in lastFailedRevision + items: + type: string + type: array + lastFailedTime: + description: lastFailedTime is the time the last failed revision + failed the last time. + format: date-time + type: string + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment + we're trying to apply + format: int32 + type: integer + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml-patch new file mode 100644 index 0000000000..8145f00c49 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml-patch @@ -0,0 +1,3 @@ +- op: replace + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/managementState/pattern + value: "^(Managed|Force)$" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml new file mode 100644 index 0000000000..ea2329342b --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml @@ -0,0 +1,235 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: kubeschedulers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: KubeScheduler + plural: kubeschedulers + singular: kubescheduler + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: KubeScheduler provides information to configure an operator to + manage scheduler. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Kubernetes Scheduler + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod + installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment + of the operand by providing a unique string. This provides a mechanism + to kick a previously failed deployment and provide a reason why + you think it will work this time instead of failing again on the + same config. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static + pod installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Kubernetes + Scheduler + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently + successful deployment + format: int32 + type: integer + lastFailedCount: + description: lastFailedCount is how often the last failed revision + failed. + type: integer + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of the errors + during the failed deployment referenced in lastFailedRevision + items: + type: string + type: array + lastFailedTime: + description: lastFailedTime is the time the last failed revision + failed the last time. + format: date-time + type: string + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment + we're trying to apply + format: int32 + type: integer + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml-patch new file mode 100644 index 0000000000..8145f00c49 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml-patch @@ -0,0 +1,3 @@ +- op: replace + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/managementState/pattern + value: "^(Managed|Force)$" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml new file mode 100644 index 0000000000..5c88fb5fda --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml @@ -0,0 +1,141 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: openshiftapiservers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: OpenShiftAPIServer + plural: openshiftapiservers + singular: openshiftapiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the OpenShift API Server. + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status defines the observed status of the OpenShift API Server. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + latestAvailableRevision: + description: latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods. + type: integer + format: int32 + minimum: 0 + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml new file mode 100644 index 0000000000..8fb4d4471e --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml @@ -0,0 +1,142 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/692 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: cloudcredentials.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: CloudCredential + listKind: CloudCredentialList + plural: cloudcredentials + singular: cloudcredential + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CloudCredential provides a means to configure an operator to manage CredentialsRequests. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator. + type: object + properties: + credentialsMode: + description: 'CredentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into "manual" mode if desired. Leaving the field in default mode runs CCO so that the cluster''s cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes: AWS/Azure/GCP: "" (Default), "Mint", "Passthrough", "Manual" Others: Do not set value as other platforms only support running in "Passthrough"' + type: string + enum: + - "" + - Manual + - Mint + - Passthrough + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: CloudCredentialStatus defines the observed status of the cloud-credential-operator. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml new file mode 100644 index 0000000000..81a74093d7 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml @@ -0,0 +1,133 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/503 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: kubestorageversionmigrators.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: KubeStorageVersionMigrator + listKind: KubeStorageVersionMigratorList + plural: kubestorageversionmigrators + singular: kubestorageversionmigrator + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml new file mode 100644 index 0000000000..a056dd6c72 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml @@ -0,0 +1,140 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: authentications.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Authentication + plural: authentications + singular: authentication + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Authentication provides information to configure an operator to manage authentication. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + oauthAPIServer: + description: OAuthAPIServer holds status specific only to oauth-apiserver + type: object + properties: + latestAvailableRevision: + description: LatestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods. + type: integer + format: int32 + minimum: 0 + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml new file mode 100644 index 0000000000..2768201cf2 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml @@ -0,0 +1,134 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: openshiftcontrollermanagers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: OpenShiftControllerManager + plural: openshiftcontrollermanagers + singular: openshiftcontrollermanager + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml new file mode 100644 index 0000000000..a265bc2c84 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml @@ -0,0 +1,134 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/670 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: storages.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Storage + plural: storages + singular: storage + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml new file mode 100644 index 0000000000..a34f345249 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml @@ -0,0 +1,1288 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/616 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: ingresscontrollers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: IngressController + listKind: IngressControllerList + plural: ingresscontrollers + singular: ingresscontroller + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "IngressController describes a managed ingress controller for + the cluster. The controller can service OpenShift Route and Kubernetes Ingress + resources. \n When an IngressController is created, a new ingress controller + deployment is created to allow external traffic to reach the services that + expose Ingress or Route resources. Updating this resource may lead to disruption + for public facing network connections as a new ingress controller revision + may be rolled out. \n https://kubernetes.io/docs/concepts/services-networking/ingress-controllers + \n Whenever possible, sensible defaults for the platform are used. See each + field for more details." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + IngressController. + properties: + defaultCertificate: + description: "defaultCertificate is a reference to a secret containing + the default certificate served by the ingress controller. When Routes + don't specify their own certificate, defaultCertificate is used. + \n The secret must contain the following keys and data: \n tls.crt: + certificate file contents tls.key: key file contents \n If unset, + a wildcard certificate is automatically generated and used. The + certificate is valid for the ingress controller domain (and subdomains) + and the generated certificate's CA will be automatically integrated + with the cluster's trust store. \n If a wildcard certificate is + used and shared by multiple HTTP/2 enabled routes (which implies + ALPN) then clients (i.e., notably browsers) are at liberty to reuse + open connections. This means a client can reuse a connection to + another route and that is likely to fail. This behaviour is generally + known as connection coalescing. \n The in-use certificate (whether + generated or user-specified) will be automatically integrated with + OpenShift's built-in OAuth server." + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + domain: + description: "domain is a DNS name serviced by the ingress controller + and is used to configure multiple features: \n * For the LoadBalancerService + endpoint publishing strategy, domain is used to configure DNS + records. See endpointPublishingStrategy. \n * When using a generated + default certificate, the certificate will be valid for domain + and its subdomains. See defaultCertificate. \n * The value is published + to individual Route statuses so that end-users know where to target + external DNS records. \n domain must be unique among all IngressControllers, + and cannot be updated. \n If empty, defaults to ingress.config.openshift.io/cluster + .spec.domain." + type: string + endpointPublishingStrategy: + description: "endpointPublishingStrategy is used to publish the ingress + controller endpoints to other networks, enable load balancer integrations, + etc. \n If unset, the default is based on infrastructure.config.openshift.io/cluster + .status.platform: \n AWS: LoadBalancerService (with External + scope) Azure: LoadBalancerService (with External scope) GCP: + \ LoadBalancerService (with External scope) IBMCloud: LoadBalancerService + (with External scope) Libvirt: HostNetwork \n Any other platform + types (including None) default to HostNetwork. \n endpointPublishingStrategy + cannot be updated." + properties: + hostNetwork: + description: hostNetwork holds parameters for the HostNetwork + endpoint publishing strategy. Present only if type is HostNetwork. + properties: + protocol: + description: "protocol specifies whether the IngressController + expects incoming connections to use plain TCP or whether + the IngressController expects PROXY protocol. \n PROXY protocol + can be used with load balancers that support it to communicate + the source addresses of client connections when forwarding + those connections to the IngressController. Using PROXY + protocol enables the IngressController to report those source + addresses instead of reporting the load balancer's address + in HTTP headers and logs. Note that enabling PROXY protocol + on the IngressController will cause connections to fail + if you are not using a load balancer that uses PROXY protocol + to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt + for information about PROXY protocol. \n The following values + are valid for this field: \n * The empty string. * \"TCP\". + * \"PROXY\". \n The empty string specifies the default, + which is TCP without PROXY protocol. Note that the default + is subject to change." + enum: + - "" + - TCP + - PROXY + type: string + type: object + loadBalancer: + description: loadBalancer holds parameters for the load balancer. + Present only if type is LoadBalancerService. + properties: + providerParameters: + description: "providerParameters holds desired load balancer + information specific to the underlying infrastructure provider. + \n If empty, defaults will be applied. See specific providerParameters + fields for details about their defaults." + properties: + aws: + description: "aws provides configuration settings that + are specific to AWS load balancers. \n If empty, defaults + will be applied. See specific aws fields for details + about their defaults." + properties: + classicLoadBalancer: + description: classicLoadBalancerParameters holds configuration + parameters for an AWS classic load balancer. Present + only if type is Classic. + type: object + networkLoadBalancer: + description: networkLoadBalancerParameters holds configuration + parameters for an AWS network load balancer. Present + only if type is NLB. + type: object + type: + description: "type is the type of AWS load balancer + to instantiate for an ingresscontroller. \n Valid + values are: \n * \"Classic\": A Classic Load Balancer + that makes routing decisions at either the transport + layer (TCP/SSL) or the application layer (HTTP/HTTPS). + See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb + \n * \"NLB\": A Network Load Balancer that makes + routing decisions at the transport layer (TCP/SSL). + See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb" + enum: + - Classic + - NLB + type: string + required: + - type + type: object + gcp: + description: "gcp provides configuration settings that + are specific to GCP load balancers. \n If empty, defaults + will be applied. See specific gcp fields for details + about their defaults." + properties: + clientAccess: + description: "clientAccess describes how client access + is restricted for internal load balancers. \n Valid + values are: * \"Global\": Specifying an internal + load balancer with Global client access allows + clients from any region within the VPC to communicate + with the load balancer. \n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access + \n * \"Local\": Specifying an internal load balancer + with Local client access means only clients within + the same region (and VPC) as the GCP load balancer + \ can communicate with the load balancer. Note + that this is the default behavior. \n https://cloud.google.com/load-balancing/docs/internal#client_access" + enum: + - Global + - Local + type: string + type: object + type: + description: type is the underlying infrastructure provider + for the load balancer. Allowed values are "AWS", "Azure", + "BareMetal", "GCP", "OpenStack", and "VSphere". + enum: + - AWS + - Azure + - BareMetal + - GCP + - OpenStack + - VSphere + - IBM + type: string + required: + - type + type: object + scope: + description: scope indicates the scope at which the load balancer + is exposed. Possible values are "External" and "Internal". + enum: + - Internal + - External + type: string + required: + - scope + type: object + nodePort: + description: nodePort holds parameters for the NodePortService + endpoint publishing strategy. Present only if type is NodePortService. + properties: + protocol: + description: "protocol specifies whether the IngressController + expects incoming connections to use plain TCP or whether + the IngressController expects PROXY protocol. \n PROXY protocol + can be used with load balancers that support it to communicate + the source addresses of client connections when forwarding + those connections to the IngressController. Using PROXY + protocol enables the IngressController to report those source + addresses instead of reporting the load balancer's address + in HTTP headers and logs. Note that enabling PROXY protocol + on the IngressController will cause connections to fail + if you are not using a load balancer that uses PROXY protocol + to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt + for information about PROXY protocol. \n The following values + are valid for this field: \n * The empty string. * \"TCP\". + * \"PROXY\". \n The empty string specifies the default, + which is TCP without PROXY protocol. Note that the default + is subject to change." + enum: + - "" + - TCP + - PROXY + type: string + type: object + private: + description: private holds parameters for the Private endpoint + publishing strategy. Present only if type is Private. + type: object + type: + description: "type is the publishing strategy to use. Valid values + are: \n * LoadBalancerService \n Publishes the ingress controller + using a Kubernetes LoadBalancer Service. \n In this configuration, + the ingress controller deployment uses container networking. + A LoadBalancer Service is created to publish the deployment. + \n See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + \n If domain is set, a wildcard DNS record will be managed to + point at the LoadBalancer Service's external name. DNS records + are managed only in DNS zones defined by dns.config.openshift.io/cluster + .spec.publicZone and .spec.privateZone. \n Wildcard DNS management + is currently supported only on the AWS, Azure, and GCP platforms. + \n * HostNetwork \n Publishes the ingress controller on node + ports where the ingress controller is deployed. \n In this configuration, + the ingress controller deployment uses host networking, bound + to node ports 80 and 443. The user is responsible for configuring + an external load balancer to publish the ingress controller + via the node ports. \n * Private \n Does not publish the ingress + controller. \n In this configuration, the ingress controller + deployment uses container networking, and is not explicitly + published. The user must manually publish the ingress controller. + \n * NodePortService \n Publishes the ingress controller using + a Kubernetes NodePort Service. \n In this configuration, the + ingress controller deployment uses container networking. A NodePort + Service is created to publish the deployment. The specific node + ports are dynamically allocated by OpenShift; however, to support + static port allocations, user changes to the node port field + of the managed NodePort Service will preserved." + enum: + - LoadBalancerService + - HostNetwork + - Private + - NodePortService + type: string + required: + - type + type: object + httpErrorCodePages: + description: httpErrorCodePages specifies a configmap with custom + error pages. The administrator must create this configmap in the + openshift-config namespace. This configmap should have keys in the + format "error-page-.http", where is an + HTTP error code. For example, "error-page-503.http" defines an error + page for HTTP 503 responses. Currently only error pages for 503 + and 404 responses can be customized. Each value in the configmap + should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http + If this field is empty, the ingress controller uses the default + error pages. + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + required: + - name + type: object + httpHeaders: + description: "httpHeaders defines policy for HTTP headers. \n If this + field is empty, the default values are used." + properties: + forwardedHeaderPolicy: + description: "forwardedHeaderPolicy specifies when and how the + IngressController sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, + X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version + HTTP headers. The value may be one of the following: \n * \"Append\", + which specifies that the IngressController appends the headers, + preserving existing headers. \n * \"Replace\", which specifies + that the IngressController sets the headers, replacing any + existing Forwarded or X-Forwarded-* headers. \n * \"IfNone\", + which specifies that the IngressController sets the headers + if they are not already set. \n * \"Never\", which specifies + that the IngressController never sets the headers, preserving + any existing headers. \n By default, the policy is \"Append\"." + enum: + - Append + - Replace + - IfNone + - Never + type: string + headerNameCaseAdjustments: + description: "headerNameCaseAdjustments specifies case adjustments + that can be applied to HTTP header names. Each adjustment is + specified as an HTTP header name with the desired capitalization. + \ For example, specifying \"X-Forwarded-For\" indicates that + the \"x-forwarded-for\" HTTP header should be adjusted to have + the specified capitalization. \n These adjustments are only + applied to cleartext, edge-terminated, and re-encrypt routes, + and only when using HTTP/1. \n For request headers, these adjustments + are applied only for routes that have the haproxy.router.openshift.io/h1-adjust-case=true + annotation. For response headers, these adjustments are applied + to all HTTP responses. \n If this field is empty, no request + headers are adjusted." + items: + description: IngressControllerHTTPHeaderNameCaseAdjustment is + the name of an HTTP header (for example, "X-Forwarded-For") + in the desired capitalization. The value must be a valid + HTTP header name as defined in RFC 2616 section 4.2. + maxLength: 1024 + minLength: 0 + pattern: ^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + nullable: true + type: array + uniqueId: + description: "uniqueId describes configuration for a custom HTTP + header that the ingress controller should inject into incoming + HTTP requests. Typically, this header is configured to have + a value that is unique to the HTTP request. The header can + be used by applications or included in access logs to facilitate + tracing individual HTTP requests. \n If this field is empty, + no such header is injected into requests." + properties: + format: + description: 'format specifies the format for the injected + HTTP header''s value. This field has no effect unless name + is specified. For the HAProxy-based ingress controller + implementation, this format uses the same syntax as the + HTTP log format. If the field is empty, the default value + is "%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid"; see the corresponding + HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3' + maxLength: 1024 + minLength: 0 + pattern: ^(%(%|(\{[-+]?[QXE](,[-+]?[QXE])*\})?([A-Za-z]+|\[[.0-9A-Z_a-z]+(\([^)]+\))?(,[.0-9A-Z_a-z]+(\([^)]+\))?)*\]))|[^%[:cntrl:]])*$ + type: string + name: + description: name specifies the name of the HTTP header (for + example, "unique-id") that the ingress controller should + inject into HTTP requests. The field's value must be a + valid HTTP header name as defined in RFC 2616 section 4.2. If + the field is empty, no header is injected. + maxLength: 1024 + minLength: 0 + pattern: ^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + type: object + type: object + logging: + description: logging defines parameters for what should be logged + where. If this field is empty, operational logs are enabled but + access logs are disabled. + properties: + access: + description: "access describes how the client requests should + be logged. \n If this field is empty, access logging is disabled." + properties: + destination: + description: destination is where access logs go. + properties: + container: + description: container holds parameters for the Container + logging destination. Present only if type is Container. + type: object + syslog: + description: syslog holds parameters for a syslog endpoint. Present + only if type is Syslog. + oneOf: + - properties: + address: + format: ipv4 + - properties: + address: + format: ipv6 + properties: + address: + description: address is the IP address of the syslog + endpoint that receives log messages. + type: string + facility: + description: "facility specifies the syslog facility + of log messages. \n If this field is empty, the + facility is \"local1\"." + enum: + - kern + - user + - mail + - daemon + - auth + - syslog + - lpr + - news + - uucp + - cron + - auth2 + - ftp + - ntp + - audit + - alert + - cron2 + - local0 + - local1 + - local2 + - local3 + - local4 + - local5 + - local6 + - local7 + type: string + port: + description: port is the UDP port number of the syslog + endpoint that receives log messages. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - address + - port + type: object + type: + description: "type is the type of destination for logs. + \ It must be one of the following: \n * Container \n + The ingress operator configures the sidecar container + named \"logs\" on the ingress controller pod and configures + the ingress controller to write logs to the sidecar. + \ The logs are then available as container logs. The + expectation is that the administrator configures a custom + logging solution that reads logs from this sidecar. + \ Note that using container logs means that logs may + be dropped if the rate of logs exceeds the container + runtime's or the custom logging solution's capacity. + \n * Syslog \n Logs are sent to a syslog endpoint. The + administrator must specify an endpoint that can receive + syslog messages. The expectation is that the administrator + has configured a custom syslog instance." + enum: + - Container + - Syslog + type: string + required: + - type + type: object + httpCaptureCookies: + description: httpCaptureCookies specifies HTTP cookies that + should be captured in access logs. If this field is empty, + no cookies are captured. + items: + description: IngressControllerCaptureHTTPCookie describes + an HTTP cookie that should be captured. + properties: + matchType: + description: matchType specifies the type of match to + be performed on the cookie name. Allowed values are + "Exact" for an exact string match and "Prefix" for + a string prefix match. If "Exact" is specified, a + name must be specified in the name field. If "Prefix" + is provided, a prefix must be specified in the namePrefix + field. For example, specifying matchType "Prefix" + and namePrefix "foo" will capture a cookie named "foo" + or "foobar" but not one named "bar". The first matching + cookie is captured. + enum: + - Exact + - Prefix + type: string + maxLength: + description: maxLength specifies a maximum length of + the string that will be logged, which includes the + cookie name, cookie value, and one-character delimiter. If + the log entry exceeds this length, the value will + be truncated in the log message. Note that the ingress + controller may impose a separate bound on the total + length of HTTP headers in a request. + maximum: 1024 + minimum: 1 + type: integer + name: + description: name specifies a cookie name. Its value + must be a valid HTTP cookie name as defined in RFC + 6265 section 4.1. + maxLength: 1024 + minLength: 0 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$ + type: string + namePrefix: + description: namePrefix specifies a cookie name prefix. Its + value must be a valid HTTP cookie name as defined + in RFC 6265 section 4.1. + maxLength: 1024 + minLength: 0 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$ + type: string + required: + - matchType + - maxLength + type: object + maxItems: 1 + nullable: true + type: array + httpCaptureHeaders: + description: "httpCaptureHeaders defines HTTP headers that + should be captured in access logs. If this field is empty, + no headers are captured. \n Note that this option only applies + to cleartext HTTP connections and to secure HTTP connections + for which the ingress controller terminates encryption (that + is, edge-terminated or reencrypt connections). Headers + cannot be captured for TLS passthrough connections." + properties: + request: + description: "request specifies which HTTP request headers + to capture. \n If this field is empty, no request headers + are captured." + items: + description: IngressControllerCaptureHTTPHeader describes + an HTTP header that should be captured. + properties: + maxLength: + description: maxLength specifies a maximum length + for the header value. If a header value exceeds + this length, the value will be truncated in the + log message. Note that the ingress controller + may impose a separate bound on the total length + of HTTP headers in a request. + minimum: 1 + type: integer + name: + description: name specifies a header name. Its + value must be a valid HTTP header name as defined + in RFC 2616 section 4.2. + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + required: + - maxLength + - name + type: object + nullable: true + type: array + response: + description: "response specifies which HTTP response headers + to capture. \n If this field is empty, no response headers + are captured." + items: + description: IngressControllerCaptureHTTPHeader describes + an HTTP header that should be captured. + properties: + maxLength: + description: maxLength specifies a maximum length + for the header value. If a header value exceeds + this length, the value will be truncated in the + log message. Note that the ingress controller + may impose a separate bound on the total length + of HTTP headers in a request. + minimum: 1 + type: integer + name: + description: name specifies a header name. Its + value must be a valid HTTP header name as defined + in RFC 2616 section 4.2. + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + required: + - maxLength + - name + type: object + nullable: true + type: array + type: object + httpLogFormat: + description: "httpLogFormat specifies the format of the log + message for an HTTP request. \n If this field is empty, + log messages use the implementation's default HTTP log format. + \ For HAProxy's default HTTP log format, see the HAProxy + documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 + \n Note that this format only applies to cleartext HTTP + connections and to secure HTTP connections for which the + ingress controller terminates encryption (that is, edge-terminated + or reencrypt connections). It does not affect the log format + for TLS passthrough connections." + type: string + required: + - destination + type: object + type: object + namespaceSelector: + description: "namespaceSelector is used to filter the set of namespaces + serviced by the ingress controller. This is useful for implementing + shards. \n If unset, the default is no filtering." + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + nodePlacement: + description: "nodePlacement enables explicit control over the scheduling + of the ingress controller. \n If unset, defaults are used. See NodePlacement + for more details." + properties: + nodeSelector: + description: "nodeSelector is the node selector applied to ingress + controller deployments. \n If unset, the default is: \n kubernetes.io/os: + linux node-role.kubernetes.io/worker: '' \n If set, the specified + selector is used and replaces the default." + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + tolerations: + description: "tolerations is a list of tolerations applied to + ingress controller deployments. \n The default is an empty list. + \n See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/" + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, allowed + values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match + all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to + the value. Valid operators are Exists and Equal. Defaults + to Equal. Exists is equivalent to wildcard for value, + so that a pod can tolerate all taints of a particular + category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the taint + forever (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + type: object + replicas: + description: replicas is the desired number of ingress controller + replicas. If unset, defaults to 2. + format: int32 + type: integer + routeAdmission: + description: "routeAdmission defines a policy for handling new route + claims (for example, to allow or deny claims across namespaces). + \n If empty, defaults will be applied. See specific routeAdmission + fields for details about their defaults." + properties: + namespaceOwnership: + description: "namespaceOwnership describes how host name claims + across namespaces should be handled. \n Value must be one of: + \n - Strict: Do not allow routes in different namespaces to + claim the same host. \n - InterNamespaceAllowed: Allow routes + to claim different paths of the same host name across namespaces. + \n If empty, the default is Strict." + enum: + - InterNamespaceAllowed + - Strict + type: string + wildcardPolicy: + description: "wildcardPolicy describes how routes with wildcard + policies should be handled for the ingress controller. WildcardPolicy + controls use of routes [1] exposed by the ingress controller + based on the route's wildcard policy. \n [1] https://github.com/openshift/api/blob/master/route/v1/types.go + \n Note: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed + will cause admitted routes with a wildcard policy of Subdomain + to stop working. These routes must be updated to a wildcard + policy of None to be readmitted by the ingress controller. \n + WildcardPolicy supports WildcardsAllowed and WildcardsDisallowed + values. \n If empty, defaults to \"WildcardsDisallowed\"." + enum: + - WildcardsAllowed + - WildcardsDisallowed + type: string + type: object + routeSelector: + description: "routeSelector is used to filter the set of Routes serviced + by the ingress controller. This is useful for implementing shards. + \n If unset, the default is no filtering." + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections + for ingresscontrollers. \n If unset, the default is based on the + apiservers.config.openshift.io/cluster resource. \n Note that when + using the Old, Intermediate, and Modern profile types, the effective + profile configuration is subject to change between releases. For + example, given a specification to use the Intermediate profile deployed + on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new + profile configuration to be applied to the ingress controller, resulting + in a rollout. \n Note that the minimum TLS version for ingress controllers + is 1.1, and the maximum TLS version is 1.2. An implication of this + restriction is that the Modern TLS profile type cannot be used because + it requires TLS 1.3." + properties: + custom: + description: "custom is a user-defined TLS security profile. Be + extremely careful using a custom profile as invalid configurations + can be catastrophic. An example custom profile looks like this: + \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 + \ minTLSVersion: TLSv1.1" + nullable: true + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms + that are negotiated during the TLS handshake. Operators + may remove entries their operands do not support. For example, + to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + items: + type: string + type: array + minTLSVersion: + description: "minTLSVersion is used to specify the minimal + version of the TLS protocol that is negotiated during the + TLS handshake. For example, to use TLS versions 1.1, 1.2 + and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently + the highest minTLSVersion allowed is VersionTLS12" + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + type: string + type: object + intermediate: + description: "intermediate is a TLS security profile based on: + \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 + \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 + \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 + \ minTLSVersion: TLSv1.2" + nullable: true + type: object + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + nullable: true + type: object + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 + \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 + \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 + \ - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 + \ - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA + \ - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 + \ - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA + \ - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - + DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 + \ - AES128-SHA256 - AES256-SHA256 - AES128-SHA - + AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + nullable: true + type: object + type: + description: "type is one of Old, Intermediate, Modern or Custom. + Custom provides the ability to specify individual TLS security + profile parameters. Old, Intermediate and Modern are TLS security + profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + \n The profiles are intent based, so they may change over time + as new ciphers are developed and existing ciphers are found + to be insecure. Depending on precisely which ciphers are available + to a process, the list may be reduced. \n Note that the Modern + profile is currently not supported because it is not yet well + adopted by common software libraries." + enum: + - Old + - Intermediate + - Modern + - Custom + type: string + type: object + tuningOptions: + description: "tuningOptions defines parameters for adjusting the performance + of ingress controller pods. All fields are optional and will use + their respective defaults if not set. See specific tuningOptions + fields for more details. \n Setting fields within tuningOptions + is generally not recommended. The default values are suitable for + most configurations." + properties: + headerBufferBytes: + description: "headerBufferBytes describes how much memory should + be reserved (in bytes) for IngressController connection sessions. + Note that this value must be at least 16384 if HTTP/2 is enabled + for the IngressController (https://tools.ietf.org/html/rfc7540). + If this field is empty, the IngressController will use a default + value of 32768 bytes. \n Setting this field is generally not + recommended as headerBufferBytes values that are too small may + break the IngressController and headerBufferBytes values that + are too large could cause the IngressController to use significantly + more memory than necessary." + format: int32 + minimum: 16384 + type: integer + headerBufferMaxRewriteBytes: + description: "headerBufferMaxRewriteBytes describes how much memory + should be reserved (in bytes) from headerBufferBytes for HTTP + header rewriting and appending for IngressController connection + sessions. Note that incoming HTTP requests will be limited to + (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning + headerBufferBytes must be greater than headerBufferMaxRewriteBytes. + If this field is empty, the IngressController will use a default + value of 8192 bytes. \n Setting this field is generally not + recommended as headerBufferMaxRewriteBytes values that are too + small may break the IngressController and headerBufferMaxRewriteBytes + values that are too large could cause the IngressController + to use significantly more memory than necessary." + format: int32 + minimum: 4096 + type: integer + threadCount: + description: "threadCount defines the number of threads created + per HAProxy process. Creating more threads allows each ingress + controller pod to handle more connections, at the cost of more + system resources being used. HAProxy currently supports up to + 64 threads. If this field is empty, the IngressController will + use the default value. The current default is 4 threads, but + this may change in future releases. \n Setting this field is + generally not recommended. Increasing the number of HAProxy + threads allows ingress controller pods to utilize more CPU time + under load, potentially starving other pods if set too high. + Reducing the number of threads may cause the ingress controller + to perform poorly." + format: int32 + maximum: 64 + minimum: 1 + type: integer + type: object + unsupportedConfigOverrides: + description: unsupportedConfigOverrides allows specifying unsupported + configuration options. Its use is unsupported. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the IngressController. + properties: + availableReplicas: + description: availableReplicas is number of observed available replicas + according to the ingress controller deployment. + format: int32 + type: integer + conditions: + description: "conditions is a list of conditions and their status. + \n Available means the ingress controller deployment is available + and servicing route and ingress resources (i.e, .status.availableReplicas + equals .spec.replicas) \n There are additional conditions which + indicate the status of other ingress controller features and capabilities. + \n * LoadBalancerManaged - True if the following conditions + are met: * The endpoint publishing strategy requires a service + load balancer. - False if any of those conditions are unsatisfied. + \n * LoadBalancerReady - True if the following conditions are + met: * A load balancer is managed. * The load balancer is + ready. - False if any of those conditions are unsatisfied. \n + \ * DNSManaged - True if the following conditions are met: * + The endpoint publishing strategy and platform support DNS. * + The ingress controller domain is set. * dns.config.openshift.io/cluster + configures DNS zones. - False if any of those conditions are unsatisfied. + \n * DNSReady - True if the following conditions are met: * + DNS is managed. * DNS records have been successfully created. + \ - False if any of those conditions are unsatisfied." + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + domain: + description: domain is the actual domain in use. + type: string + endpointPublishingStrategy: + description: endpointPublishingStrategy is the actual strategy in + use. + properties: + hostNetwork: + description: hostNetwork holds parameters for the HostNetwork + endpoint publishing strategy. Present only if type is HostNetwork. + properties: + protocol: + description: "protocol specifies whether the IngressController + expects incoming connections to use plain TCP or whether + the IngressController expects PROXY protocol. \n PROXY protocol + can be used with load balancers that support it to communicate + the source addresses of client connections when forwarding + those connections to the IngressController. Using PROXY + protocol enables the IngressController to report those source + addresses instead of reporting the load balancer's address + in HTTP headers and logs. Note that enabling PROXY protocol + on the IngressController will cause connections to fail + if you are not using a load balancer that uses PROXY protocol + to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt + for information about PROXY protocol. \n The following values + are valid for this field: \n * The empty string. * \"TCP\". + * \"PROXY\". \n The empty string specifies the default, + which is TCP without PROXY protocol. Note that the default + is subject to change." + enum: + - "" + - TCP + - PROXY + type: string + type: object + loadBalancer: + description: loadBalancer holds parameters for the load balancer. + Present only if type is LoadBalancerService. + properties: + providerParameters: + description: "providerParameters holds desired load balancer + information specific to the underlying infrastructure provider. + \n If empty, defaults will be applied. See specific providerParameters + fields for details about their defaults." + properties: + aws: + description: "aws provides configuration settings that + are specific to AWS load balancers. \n If empty, defaults + will be applied. See specific aws fields for details + about their defaults." + properties: + classicLoadBalancer: + description: classicLoadBalancerParameters holds configuration + parameters for an AWS classic load balancer. Present + only if type is Classic. + type: object + networkLoadBalancer: + description: networkLoadBalancerParameters holds configuration + parameters for an AWS network load balancer. Present + only if type is NLB. + type: object + type: + description: "type is the type of AWS load balancer + to instantiate for an ingresscontroller. \n Valid + values are: \n * \"Classic\": A Classic Load Balancer + that makes routing decisions at either the transport + layer (TCP/SSL) or the application layer (HTTP/HTTPS). + See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb + \n * \"NLB\": A Network Load Balancer that makes + routing decisions at the transport layer (TCP/SSL). + See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb" + enum: + - Classic + - NLB + type: string + required: + - type + type: object + gcp: + description: "gcp provides configuration settings that + are specific to GCP load balancers. \n If empty, defaults + will be applied. See specific gcp fields for details + about their defaults." + properties: + clientAccess: + description: "clientAccess describes how client access + is restricted for internal load balancers. \n Valid + values are: * \"Global\": Specifying an internal + load balancer with Global client access allows + clients from any region within the VPC to communicate + with the load balancer. \n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access + \n * \"Local\": Specifying an internal load balancer + with Local client access means only clients within + the same region (and VPC) as the GCP load balancer + \ can communicate with the load balancer. Note + that this is the default behavior. \n https://cloud.google.com/load-balancing/docs/internal#client_access" + enum: + - Global + - Local + type: string + type: object + type: + description: type is the underlying infrastructure provider + for the load balancer. Allowed values are "AWS", "Azure", + "BareMetal", "GCP", "OpenStack", and "VSphere". + enum: + - AWS + - Azure + - BareMetal + - GCP + - OpenStack + - VSphere + - IBM + type: string + required: + - type + type: object + scope: + description: scope indicates the scope at which the load balancer + is exposed. Possible values are "External" and "Internal". + enum: + - Internal + - External + type: string + required: + - scope + type: object + nodePort: + description: nodePort holds parameters for the NodePortService + endpoint publishing strategy. Present only if type is NodePortService. + properties: + protocol: + description: "protocol specifies whether the IngressController + expects incoming connections to use plain TCP or whether + the IngressController expects PROXY protocol. \n PROXY protocol + can be used with load balancers that support it to communicate + the source addresses of client connections when forwarding + those connections to the IngressController. Using PROXY + protocol enables the IngressController to report those source + addresses instead of reporting the load balancer's address + in HTTP headers and logs. Note that enabling PROXY protocol + on the IngressController will cause connections to fail + if you are not using a load balancer that uses PROXY protocol + to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt + for information about PROXY protocol. \n The following values + are valid for this field: \n * The empty string. * \"TCP\". + * \"PROXY\". \n The empty string specifies the default, + which is TCP without PROXY protocol. Note that the default + is subject to change." + enum: + - "" + - TCP + - PROXY + type: string + type: object + private: + description: private holds parameters for the Private endpoint + publishing strategy. Present only if type is Private. + type: object + type: + description: "type is the publishing strategy to use. Valid values + are: \n * LoadBalancerService \n Publishes the ingress controller + using a Kubernetes LoadBalancer Service. \n In this configuration, + the ingress controller deployment uses container networking. + A LoadBalancer Service is created to publish the deployment. + \n See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + \n If domain is set, a wildcard DNS record will be managed to + point at the LoadBalancer Service's external name. DNS records + are managed only in DNS zones defined by dns.config.openshift.io/cluster + .spec.publicZone and .spec.privateZone. \n Wildcard DNS management + is currently supported only on the AWS, Azure, and GCP platforms. + \n * HostNetwork \n Publishes the ingress controller on node + ports where the ingress controller is deployed. \n In this configuration, + the ingress controller deployment uses host networking, bound + to node ports 80 and 443. The user is responsible for configuring + an external load balancer to publish the ingress controller + via the node ports. \n * Private \n Does not publish the ingress + controller. \n In this configuration, the ingress controller + deployment uses container networking, and is not explicitly + published. The user must manually publish the ingress controller. + \n * NodePortService \n Publishes the ingress controller using + a Kubernetes NodePort Service. \n In this configuration, the + ingress controller deployment uses container networking. A NodePort + Service is created to publish the deployment. The specific node + ports are dynamically allocated by OpenShift; however, to support + static port allocations, user changes to the node port field + of the managed NodePort Service will preserved." + enum: + - LoadBalancerService + - HostNetwork + - Private + - NodePortService + type: string + required: + - type + type: object + observedGeneration: + description: observedGeneration is the most recent generation observed. + format: int64 + type: integer + selector: + description: selector is a label selector, in string format, for ingress + controller pods corresponding to the IngressController. The number + of matching pods should equal the value of availableReplicas. + type: string + tlsProfile: + description: tlsProfile is the TLS connection configuration that is + in effect. + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms + that are negotiated during the TLS handshake. Operators may + remove entries their operands do not support. For example, + to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + items: + type: string + type: array + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version + of the TLS protocol that is negotiated during the TLS handshake. + For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n + \ minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion + allowed is VersionTLS12" + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.availableReplicas + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml-patch new file mode 100644 index 0000000000..6076c3a31b --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml-patch @@ -0,0 +1,9 @@ +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/logging/properties/access/properties/destination/properties/syslog/oneOf + value: + - properties: + address: + format: ipv4 + - properties: + address: + format: ipv6 diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml new file mode 100644 index 0000000000..656b6184e7 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml @@ -0,0 +1,135 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: servicecas.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: ServiceCA + listKind: ServiceCAList + plural: servicecas + singular: serviceca + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ServiceCA provides information to configure an operator to manage the service cert controllers + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml new file mode 100644 index 0000000000..5918e6dd4b --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml @@ -0,0 +1,443 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: networks.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSpec is the top-level network configuration object. + type: object + properties: + additionalNetworks: + description: additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled. + type: array + items: + description: AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one "Config" that matches the type. + type: object + properties: + name: + description: name is the name of the network. This will be populated in the resulting CRD This must be unique. + type: string + namespace: + description: namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace. + type: string + rawCNIConfig: + description: rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD + type: string + simpleMacvlanConfig: + description: SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan + type: object + properties: + ipamConfig: + description: IPAMConfig configures IPAM module will be used for IP Address Management (IPAM). + type: object + properties: + staticIPAMConfig: + description: StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic + type: object + properties: + addresses: + description: Addresses configures IP address for the interface + type: array + items: + description: StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses + type: object + properties: + address: + description: Address is the IP address in CIDR format + type: string + gateway: + description: Gateway is IP inside of subnet to designate as the gateway + type: string + dns: + description: DNS configures DNS for the interface + type: object + properties: + domain: + description: Domain configures the domainname the local domain used for short hostname lookups + type: string + nameservers: + description: Nameservers points DNS servers for IP lookup + type: array + items: + type: string + search: + description: Search configures priority ordered search domains for short hostname lookups + type: array + items: + type: string + routes: + description: Routes configures IP routes for the interface + type: array + items: + description: StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes + type: object + properties: + destination: + description: Destination points the IP route destination + type: string + gateway: + description: Gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin). + type: string + type: + description: Type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic + type: string + master: + description: master is the host interface to create the macvlan interface from. If not specified, it will be default route interface + type: string + mode: + description: 'mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge' + type: string + mtu: + description: mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value. + type: integer + format: int32 + minimum: 0 + type: + description: type is the type of network The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan + type: string + clusterNetwork: + description: clusterNetwork is the IP address pool to use for pod IPs. Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr. + type: array + items: + description: ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks + type: object + properties: + cidr: + type: string + hostPrefix: + type: integer + format: int32 + minimum: 0 + defaultNetwork: + description: defaultNetwork is the "default" network that all pods will receive + type: object + properties: + kuryrConfig: + description: KuryrConfig configures the kuryr plugin + type: object + properties: + controllerProbesPort: + description: The port kuryr-controller will listen for readiness and liveness requests. + type: integer + format: int32 + minimum: 0 + daemonProbesPort: + description: The port kuryr-daemon will listen for readiness and liveness requests. + type: integer + format: int32 + minimum: 0 + enablePortPoolsPrepopulation: + description: enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact that it takes a significant amount of time to create one. Instead of creating it when pod is being deployed, Kuryr keeps a number of ports ready to be attached to pods. By default port prepopulation is disabled. + type: boolean + mtu: + description: mtu is the MTU that Kuryr should use when creating pod networks in Neutron. The value has to be lower or equal to the MTU of the nodes network and Neutron has to allow creation of tenant networks with such MTU. If unset Pod networks will be created with the same MTU as the nodes network has. + type: integer + format: int32 + minimum: 0 + openStackServiceNetwork: + description: openStackServiceNetwork contains the CIDR of network from which to allocate IPs for OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses two IPs from that network for each loadbalancer - one given by OpenShift and second for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix size by 1. + type: string + poolBatchPorts: + description: poolBatchPorts sets a number of ports that should be created in a single batch request to extend the port pool. The default is 3. For more information about port pools see enablePortPoolsPrepopulation setting. + type: integer + minimum: 0 + poolMaxPorts: + description: poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. If the number of ports exceeds this setting, free ports will get deleted. Setting 0 will disable this upper bound, effectively preventing pools from shrinking and this is the default value. For more information about port pools see enablePortPoolsPrepopulation setting. + type: integer + minimum: 0 + poolMinPorts: + description: poolMinPorts sets a minimum number of free ports that should be kept in a port pool. If the number of ports is lower than this setting, new ports will get created and added to pool. The default is 1. For more information about port pools see enablePortPoolsPrepopulation setting. + type: integer + minimum: 1 + openshiftSDNConfig: + description: openShiftSDNConfig configures the openshift-sdn plugin + type: object + properties: + enableUnidling: + description: enableUnidling controls whether or not the service proxy will support idling and unidling of services. By default, unidling is enabled. + type: boolean + mode: + description: mode is one of "Multitenant", "Subnet", or "NetworkPolicy" + type: string + mtu: + description: mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink. + type: integer + format: int32 + minimum: 0 + useExternalOpenvswitch: + description: 'useExternalOpenvswitch used to control whether the operator would deploy an OVS DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always run as a system service, and this flag is ignored. DEPRECATED: non-functional as of 4.6' + type: boolean + vxlanPort: + description: vxlanPort is the port to use for all vxlan packets. The default is 4789. + type: integer + format: int32 + minimum: 0 + ovnKubernetesConfig: + description: oVNKubernetesConfig configures the ovn-kubernetes plugin. This is currently not implemented. + type: object + properties: + genevePort: + description: geneve port is the UDP port to be used by geneve encapulation. Default is 6081 + type: integer + format: int32 + minimum: 1 + hybridOverlayConfig: + description: HybridOverlayConfig configures an additional overlay network for peers that are not using OVN. + type: object + properties: + hybridClusterNetwork: + description: HybridClusterNetwork defines a network space given to nodes on an additional overlay network. + type: array + items: + description: ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks + type: object + properties: + cidr: + type: string + hostPrefix: + type: integer + format: int32 + minimum: 0 + hybridOverlayVXLANPort: + description: HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789 + type: integer + format: int32 + ipsecConfig: + description: ipsecConfig enables and configures IPsec for pods on the pod network within the cluster. + type: object + mtu: + description: mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400 + type: integer + format: int32 + minimum: 0 + policyAuditConfig: + description: policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used. + type: object + properties: + destination: + description: 'destination is the location for policy log messages. Regardless of this config, persistent logs will always be dumped to the host at /var/log/ovn/ however Additionally syslog output may be configured as follows. Valid values are: - "libc" -> to use the libc syslog() function of the host node''s journdald process - "udp:host:port" -> for sending syslog over UDP - "unix:file" -> for using the UNIX domain socket directly - "null" -> to discard all messages logged to syslog The default is "null"' + type: string + default: "null" + maxFileSize: + description: maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs Units are in MB and the Default is 50MB + type: integer + format: int32 + default: 50 + minimum: 1 + rateLimit: + description: rateLimit is the approximate maximum number of messages to generate per-second per-node. If unset the default of 20 msg/sec is used. + type: integer + format: int32 + default: 20 + minimum: 1 + syslogFacility: + description: syslogFacility the RFC5424 facility for generated messages, e.g. "kern". Default is "local0" + type: string + default: local0 + type: + description: type is the type of network All NetworkTypes are supported except for NetworkTypeRaw + type: string + deployKubeProxy: + description: deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when OpenShift SDN and ovn-kubernetes are used and true otherwise. + type: boolean + disableMultiNetwork: + description: disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled. + type: boolean + disableNetworkDiagnostics: + description: disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck CRs from a test pod to every node, apiserver and LB should be disabled or not. If unset, this property defaults to 'false' and network diagnostics is enabled. Setting this to 'true' would reduce the additional load of the pods performing the checks. + type: boolean + default: false + exportNetworkFlows: + description: exportNetworkFlows enables and configures the export of network flow metadata from the pod network by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. If unset, flows will not be exported to any collector. + type: object + properties: + ipfix: + description: ipfix defines IPFIX configuration. + type: object + properties: + collectors: + description: ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items + type: array + maxItems: 10 + minItems: 1 + items: + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + netFlow: + description: netFlow defines the NetFlow configuration. + type: object + properties: + collectors: + description: netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. It is a list of strings formatted as ip:port with a maximum of ten items + type: array + maxItems: 10 + minItems: 1 + items: + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + sFlow: + description: sFlow defines the SFlow configuration. + type: object + properties: + collectors: + description: sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items + type: array + maxItems: 10 + minItems: 1 + items: + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + kubeProxyConfig: + description: kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift directly. Not consumed by all network providers - currently only openshift-sdn. + type: object + properties: + bindAddress: + description: The address to "bind" on Defaults to 0.0.0.0 + type: string + iptablesSyncPeriod: + description: 'An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted in large clusters for performance reasons, but this is no longer necessary, and there is no reason to change this from the default value. Default: 30s' + type: string + proxyArguments: + description: Any additional arguments to pass to the kubeproxy process + type: object + additionalProperties: + description: ProxyArgumentList is a list of arguments to pass to the kubeproxy process + type: array + items: + type: string + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + migration: + description: migration enables and configures the cluster network migration. Setting this to the target network type to allow changing the default network. If unset, the operation of changing cluster default network plugin will be rejected. + type: object + properties: + networkType: + description: networkType is the target type of network migration The supported values are OpenShiftSDN, OVNKubernetes + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + serviceNetwork: + description: serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth. + type: array + items: + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + useMultiNetworkPolicy: + description: useMultiNetworkPolicy enables a controller which allows for MultiNetworkPolicy objects to be used on additional networks as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy objects, but NetworkPolicy objects only apply to the primary interface. With MultiNetworkPolicy, you can control the traffic that a pod can receive over the secondary interfaces. If unset, this property defaults to 'false' and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is 'true' then the value of this field is ignored. + type: boolean + status: + description: NetworkStatus is detailed operator status, which is distilled up to the Network clusteroperator object. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml new file mode 100644 index 0000000000..e3afdf7d6c --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml @@ -0,0 +1,275 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/486 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: consoles.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Console + listKind: ConsoleList + plural: consoles + singular: console + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Console provides a means to configure an operator to manage the console. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleSpec is the specification of the desired behavior of the Console. + type: object + properties: + customization: + description: customization is used to optionally provide a small set of customization options to the web console. + type: object + properties: + addPage: + description: addPage allows customizing actions on the Add page in developer perspective. + type: object + properties: + disabledActions: + description: disabledActions is a list of actions that are not shown to users. Each action in the list is represented by its ID. + type: array + minItems: 1 + items: + type: string + brand: + description: brand is the default branding of the web console which can be overridden by providing the brand field. There is a limited set of specific brand options. This field controls elements of the console such as the logo. Invalid value will prevent a console rollout. + type: string + pattern: ^$|^(ocp|origin|okd|dedicated|online|azure)$ + customLogoFile: + description: 'customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a ConfigMap in the openshift-config namespace. This can be created with a command like ''oc create configmap custom-logo --from-file=/path/to/file -n openshift-config''. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. Recommended logo specifications: Dimensions: Max height of 68px and max width of 200px SVG format preferred' + type: object + properties: + key: + description: Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + type: string + name: + type: string + customProductName: + description: customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog instead of the normal OpenShift product name. + type: string + developerCatalog: + description: developerCatalog allows to configure the shown developer catalog categories. + type: object + properties: + categories: + description: categories which are shown in the developer catalog. + type: array + items: + description: DeveloperConsoleCatalogCategory for the developer console catalog. + type: object + required: + - id + - label + properties: + id: + description: ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. + type: string + maxLength: 32 + minLength: 1 + pattern: ^[A-Za-z0-9-_]+$ + label: + description: label defines a category display label. It is required and must have 1-64 characters. + type: string + maxLength: 64 + minLength: 1 + subcategories: + description: subcategories defines a list of child categories. + type: array + items: + description: DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category. + type: object + required: + - id + - label + properties: + id: + description: ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. + type: string + maxLength: 32 + minLength: 1 + pattern: ^[A-Za-z0-9-_]+$ + label: + description: label defines a category display label. It is required and must have 1-64 characters. + type: string + maxLength: 64 + minLength: 1 + tags: + description: tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item. + type: array + items: + type: string + tags: + description: tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item. + type: array + items: + type: string + documentationBaseURL: + description: documentationBaseURL links to external documentation are shown in various sections of the web console. Providing documentationBaseURL will override the default documentation URL. Invalid value will prevent a console rollout. + type: string + pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))\/$ + projectAccess: + description: projectAccess allows customizing the available list of ClusterRoles in the Developer perspective Project access page which can be used by a project admin to specify roles to other users and restrict access within the project. If set, the list will replace the default ClusterRole options. + type: object + properties: + availableClusterRoles: + description: availableClusterRoles is the list of ClusterRole names that are assignable to users through the project access tab. + type: array + items: + type: string + quickStarts: + description: quickStarts allows customization of available ConsoleQuickStart resources in console. + type: object + properties: + disabled: + description: disabled is a list of ConsoleQuickStart resource names that are not shown to users. + type: array + items: + type: string + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + plugins: + description: plugins defines a list of enabled console plugin names. + type: array + items: + type: string + providers: + description: providers contains configuration for using specific service providers. + type: object + properties: + statuspage: + description: statuspage contains ID for statuspage.io page that provides status info about. + type: object + properties: + pageID: + description: pageID is the unique ID assigned by Statuspage for your page. This must be a public page. + type: string + route: + description: route contains hostname and secret reference that contains the serving certificate. If a custom route is specified, a new route will be created with the provided hostname, under which console will be available. In case of custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. The default console route will be maintained to reserve the default hostname for console if the custom route is removed. If not specified, default route will be used. DEPRECATED + type: object + properties: + hostname: + description: hostname is the desired custom domain under which console will be available. + type: string + secret: + description: 'secret points to secret in the openshift-config namespace that contains custom certificate and key and needs to be created manually by the cluster admin. Referenced Secret is required to contain following key value pairs: - "tls.crt" - to specifies custom certificate - "tls.key" - to specifies private key of the custom certificate If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: ConsoleStatus defines the observed status of the Console. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml new file mode 100644 index 0000000000..4e7032414d --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml @@ -0,0 +1,128 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: dnses.operator.openshift.io + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" +spec: + group: operator.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + description: "DNS manages the CoreDNS component to provide a name resolution service for pods and services in the cluster. \n This supports the DNS-based service discovery specification: https://github.com/kubernetes/dns/blob/master/docs/specification.md \n More details: https://kubernetes.io/docs/tasks/administer-cluster/coredns" + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the DNS. + type: object + properties: + nodePlacement: + description: "nodePlacement provides explicit control over the scheduling of DNS pods. \n Generally, it is useful to run a DNS pod on every node so that DNS queries are always handled by a local DNS pod instead of going over the network to a DNS pod on another node. However, security policies may require restricting the placement of DNS pods to specific nodes. For example, if a security policy prohibits pods on arbitrary nodes from communicating with the API, a node selector can be specified to restrict DNS pods to nodes that are permitted to communicate with the API. Conversely, if running DNS pods on nodes with a particular taint is desired, a toleration can be specified for that taint. \n If unset, defaults are used. See nodePlacement for more details." + type: object + properties: + nodeSelector: + description: "nodeSelector is the node selector applied to DNS pods. \n If empty, the default is used, which is currently the following: \n kubernetes.io/os: linux \n This default is subject to change. \n If set, the specified selector is used and replaces the default." + type: object + additionalProperties: + type: string + tolerations: + description: "tolerations is a list of tolerations applied to DNS pods. \n If empty, the DNS operator sets a toleration for the \"node-role.kubernetes.io/master\" taint. This default is subject to change. Specifying tolerations without including a toleration for the \"node-role.kubernetes.io/master\" taint may be risky as it could lead to an outage if all worker nodes become unavailable. \n Note that the daemon controller adds some tolerations as well. See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/" + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + servers: + description: "servers is a list of DNS resolvers that provide name query delegation for one or more subdomains outside the scope of the cluster domain. If servers consists of more than one Server, longest suffix match will be used to determine the Server. \n For example, if there are two Servers, one for \"foo.com\" and another for \"a.foo.com\", and the name query is for \"www.a.foo.com\", it will be routed to the Server with Zone \"a.foo.com\". \n If this field is nil, no servers are created." + type: array + items: + description: Server defines the schema for a server that runs per instance of CoreDNS. + type: object + properties: + forwardPlugin: + description: forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers. + type: object + properties: + upstreams: + description: "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Upstreams are randomized when more than 1 upstream is specified. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53. \n A maximum of 15 upstreams is allowed per ForwardPlugin." + type: array + maxItems: 15 + items: + type: string + name: + description: name is required and specifies a unique name for the server. Name must comply with the Service Name Syntax of rfc6335. + type: string + zones: + description: zones is required and specifies the subdomains that Server is authoritative for. Zones must conform to the rfc1123 definition of a subdomain. Specifying the cluster domain (i.e., "cluster.local") is invalid. + type: array + items: + type: string + status: + description: status is the most recently observed status of the DNS. + type: object + required: + - clusterDomain + - clusterIP + properties: + clusterDomain: + description: "clusterDomain is the local cluster DNS domain suffix for DNS services. This will be a subdomain as defined in RFC 1034, section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 Example: \"cluster.local\" \n More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service" + type: string + clusterIP: + description: "clusterIP is the service IP through which this DNS is made available. \n In the case of the default DNS, this will be a well known IP that is used as the default nameserver for pods that are using the default ClusterFirst DNS policy. \n In general, this IP can be specified in a pod's spec.dnsConfig.nameservers list or used explicitly when performing name resolution from within the cluster. Example: dig foo.com @ \n More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies" + type: string + conditions: + description: "conditions provide information about the state of the DNS on the cluster. \n These are the supported DNS conditions: \n * Available - True if the following conditions are met: * DNS controller daemonset is available. - False if any of those conditions are unsatisfied." + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string diff --git a/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml new file mode 100644 index 0000000000..88fa22450c --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml @@ -0,0 +1,134 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/562 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: csisnapshotcontrollers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: CSISnapshotController + plural: csisnapshotcontrollers + singular: csisnapshotcontroller + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml new file mode 100644 index 0000000000..1cc88838a1 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml @@ -0,0 +1,178 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/701 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: clustercsidrivers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: ClusterCSIDriver + plural: clustercsidrivers + singular: clustercsidriver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterCSIDriver object allows management and configuration of + a CSI driver operator installed by default in OpenShift. Name of the object + must be name of the CSI driver it operates. See CSIDriverName type for list + of allowed values. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + properties: + name: + enum: + - ebs.csi.aws.com + - efs.csi.aws.com + - disk.csi.azure.com + - pd.csi.storage.gke.io + - cinder.csi.openstack.org + - csi.vsphere.vmware.com + - manila.csi.openstack.org + - csi.ovirt.org + - csi.kubevirt.io + - csi.shared-resources.openshift.io + type: string + type: object + spec: + description: spec holds user settable values for configuration + properties: + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch new file mode 100644 index 0000000000..508903a8ca --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch @@ -0,0 +1,16 @@ +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/metadata/properties + value: + name: + type: string + enum: + - ebs.csi.aws.com + - efs.csi.aws.com + - disk.csi.azure.com + - pd.csi.storage.gke.io + - cinder.csi.openstack.org + - csi.vsphere.vmware.com + - manila.csi.openstack.org + - csi.ovirt.org + - csi.kubevirt.io + - csi.shared-resources.openshift.io diff --git a/vendor/github.com/openshift/api/operator/v1/doc.go b/vendor/github.com/openshift/api/operator/v1/doc.go new file mode 100644 index 0000000000..3de961a7fc --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=operator.openshift.io +package v1 diff --git a/vendor/github.com/openshift/api/operator/v1/register.go b/vendor/github.com/openshift/api/operator/v1/register.go new file mode 100644 index 0000000000..71727a824c --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/register.go @@ -0,0 +1,76 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "operator.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &Authentication{}, + &AuthenticationList{}, + &DNS{}, + &DNSList{}, + &CloudCredential{}, + &CloudCredentialList{}, + &ClusterCSIDriver{}, + &ClusterCSIDriverList{}, + &Console{}, + &ConsoleList{}, + &CSISnapshotController{}, + &CSISnapshotControllerList{}, + &Etcd{}, + &EtcdList{}, + &KubeAPIServer{}, + &KubeAPIServerList{}, + &KubeControllerManager{}, + &KubeControllerManagerList{}, + &KubeScheduler{}, + &KubeSchedulerList{}, + &KubeStorageVersionMigrator{}, + &KubeStorageVersionMigratorList{}, + &Network{}, + &NetworkList{}, + &OpenShiftAPIServer{}, + &OpenShiftAPIServerList{}, + &OpenShiftControllerManager{}, + &OpenShiftControllerManagerList{}, + &ServiceCA{}, + &ServiceCAList{}, + &ServiceCatalogAPIServer{}, + &ServiceCatalogAPIServerList{}, + &ServiceCatalogControllerManager{}, + &ServiceCatalogControllerManagerList{}, + &IngressController{}, + &IngressControllerList{}, + &Storage{}, + &StorageList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go new file mode 100644 index 0000000000..c4586ad317 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types.go @@ -0,0 +1,227 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// MyOperatorResource is an example operator configuration type +type MyOperatorResource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec MyOperatorResourceSpec `json:"spec"` + Status MyOperatorResourceStatus `json:"status"` +} + +type MyOperatorResourceSpec struct { + OperatorSpec `json:",inline"` +} + +type MyOperatorResourceStatus struct { + OperatorStatus `json:",inline"` +} + +// +kubebuilder:validation:Pattern=`^(Managed|Unmanaged|Force|Removed)$` +type ManagementState string + +var ( + // Force means that the operator is actively managing its resources but will not block an upgrade + // if unmet prereqs exist. This state puts the operator at risk for unsuccessful upgrades + Force ManagementState = "Force" + // Managed means that the operator is actively managing its resources and trying to keep the component active. + // It will only upgrade the component if it is safe to do so + Managed ManagementState = "Managed" + // Unmanaged means that the operator will not take any action related to the component + // Some operators might not support this management state as it might damage the cluster and lead to manual recovery. + Unmanaged ManagementState = "Unmanaged" + // Removed means that the operator is actively managing its resources and trying to remove all traces of the component + // Some operators (like kube-apiserver-operator) might not support this management state as removing the API server will + // brick the cluster. + Removed ManagementState = "Removed" +) + +// OperatorSpec contains common fields operators need. It is intended to be anonymous included +// inside of the Spec struct for your particular operator. +type OperatorSpec struct { + // managementState indicates whether and how the operator should manage the component + ManagementState ManagementState `json:"managementState"` + + // logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a + // simple way to manage coarse grained logging choices that operators have to interpret for their operands. + // + // Valid values are: "Normal", "Debug", "Trace", "TraceAll". + // Defaults to "Normal". + // +optional + // +kubebuilder:default=Normal + LogLevel LogLevel `json:"logLevel,omitempty"` + + // operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + // simple way to manage coarse grained logging choices that operators have to interpret for themselves. + // + // Valid values are: "Normal", "Debug", "Trace", "TraceAll". + // Defaults to "Normal". + // +optional + // +kubebuilder:default=Normal + OperatorLogLevel LogLevel `json:"operatorLogLevel,omitempty"` + + // unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override + // it will end up overlaying in the following order: + // 1. hardcoded defaults + // 2. observedConfig + // 3. unsupportedConfigOverrides + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + UnsupportedConfigOverrides runtime.RawExtension `json:"unsupportedConfigOverrides"` + + // observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because + // it is an input to the level for the operator + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + ObservedConfig runtime.RawExtension `json:"observedConfig"` +} + +// +kubebuilder:validation:Enum="";Normal;Debug;Trace;TraceAll +type LogLevel string + +var ( + // Normal is the default. Normal, working log information, everything is fine, but helpful notices for auditing or common operations. In kube, this is probably glog=2. + Normal LogLevel = "Normal" + + // Debug is used when something went wrong. Even common operations may be logged, and less helpful but more quantity of notices. In kube, this is probably glog=4. + Debug LogLevel = "Debug" + + // Trace is used when something went really badly and even more verbose logs are needed. Logging every function call as part of a common operation, to tracing execution of a query. In kube, this is probably glog=6. + Trace LogLevel = "Trace" + + // TraceAll is used when something is broken at the level of API content/decoding. It will dump complete body content. If you turn this on in a production cluster + // prepare from serious performance issues and massive amounts of logs. In kube, this is probably glog=8. + TraceAll LogLevel = "TraceAll" +) + +type OperatorStatus struct { + // observedGeneration is the last generation change you've dealt with + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // conditions is a list of conditions and their status + // +optional + Conditions []OperatorCondition `json:"conditions,omitempty"` + + // version is the level this availability applies to + // +optional + Version string `json:"version,omitempty"` + + // readyReplicas indicates how many replicas are ready and at the desired state + ReadyReplicas int32 `json:"readyReplicas"` + + // generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + // +optional + Generations []GenerationStatus `json:"generations,omitempty"` +} + +// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +type GenerationStatus struct { + // group is the group of the thing you're tracking + Group string `json:"group"` + // resource is the resource type of the thing you're tracking + Resource string `json:"resource"` + // namespace is where the thing you're tracking is + Namespace string `json:"namespace"` + // name is the name of the thing you're tracking + Name string `json:"name"` + // lastGeneration is the last generation of the workload controller involved + LastGeneration int64 `json:"lastGeneration"` + // hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + Hash string `json:"hash"` +} + +var ( + // Available indicates that the operand is present and accessible in the cluster + OperatorStatusTypeAvailable = "Available" + // Progressing indicates that the operator is trying to transition the operand to a different state + OperatorStatusTypeProgressing = "Progressing" + // Degraded indicates that the operator (not the operand) is unable to fulfill the user intent + OperatorStatusTypeDegraded = "Degraded" + // PrereqsSatisfied indicates that the things this operator depends on are present and at levels compatible with the + // current and desired states. + OperatorStatusTypePrereqsSatisfied = "PrereqsSatisfied" + // Upgradeable indicates that the operator configuration itself (not prereqs) can be auto-upgraded by the CVO + OperatorStatusTypeUpgradeable = "Upgradeable" +) + +// OperatorCondition is just the standard condition fields. +type OperatorCondition struct { + Type string `json:"type"` + Status ConditionStatus `json:"status"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + Reason string `json:"reason,omitempty"` + Message string `json:"message,omitempty"` +} + +type ConditionStatus string + +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// StaticPodOperatorSpec is spec for controllers that manage static pods. +type StaticPodOperatorSpec struct { + OperatorSpec `json:",inline"` + + // forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. + // This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work + // this time instead of failing again on the same config. + ForceRedeploymentReason string `json:"forceRedeploymentReason"` + + // failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api + // -1 = unlimited, 0 or unset = 5 (default) + FailedRevisionLimit int32 `json:"failedRevisionLimit,omitempty"` + // succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api + // -1 = unlimited, 0 or unset = 5 (default) + SucceededRevisionLimit int32 `json:"succeededRevisionLimit,omitempty"` +} + +// StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual +// node status must be tracked. +type StaticPodOperatorStatus struct { + OperatorStatus `json:",inline"` + + // latestAvailableRevision is the deploymentID of the most recent deployment + // +optional + LatestAvailableRevision int32 `json:"latestAvailableRevision,omitEmpty"` + + // latestAvailableRevisionReason describe the detailed reason for the most recent deployment + // +optional + LatestAvailableRevisionReason string `json:"latestAvailableRevisionReason,omitEmpty"` + + // nodeStatuses track the deployment values and errors across individual nodes + // +optional + NodeStatuses []NodeStatus `json:"nodeStatuses,omitempty"` +} + +// NodeStatus provides information about the current state of a particular node managed by this operator. +type NodeStatus struct { + // nodeName is the name of the node + NodeName string `json:"nodeName"` + + // currentRevision is the generation of the most recently successful deployment + CurrentRevision int32 `json:"currentRevision"` + // targetRevision is the generation of the deployment we're trying to apply + TargetRevision int32 `json:"targetRevision,omitempty"` + + // lastFailedRevision is the generation of the deployment we tried and failed to deploy. + LastFailedRevision int32 `json:"lastFailedRevision,omitempty"` + // lastFailedTime is the time the last failed revision failed the last time. + LastFailedTime *metav1.Time `json:"lastFailedTime,omitempty"` + // lastFailedCount is how often the last failed revision failed. + LastFailedCount int `json:"lastFailedCount,omitempty"` + // lastFailedRevisionErrors is a list of the errors during the failed deployment referenced in lastFailedRevision + LastFailedRevisionErrors []string `json:"lastFailedRevisionErrors,omitempty"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_authentication.go b/vendor/github.com/openshift/api/operator/v1/types_authentication.go new file mode 100644 index 0000000000..61c777cf26 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_authentication.go @@ -0,0 +1,51 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Authentication provides information to configure an operator to manage authentication. +type Authentication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +required + Spec AuthenticationSpec `json:"spec,omitempty"` + // +optional + Status AuthenticationStatus `json:"status,omitempty"` +} + +type AuthenticationSpec struct { + OperatorSpec `json:",inline"` +} + +type AuthenticationStatus struct { + // OAuthAPIServer holds status specific only to oauth-apiserver + // +optional + OAuthAPIServer OAuthAPIServerStatus `json:"oauthAPIServer,omitempty"` + + OperatorStatus `json:",inline"` +} + +type OAuthAPIServerStatus struct { + // LatestAvailableRevision is the latest revision used as suffix of revisioned + // secrets like encryption-config. A new revision causes a new deployment of pods. + // +optional + // +kubebuilder:validation:Minimum=0 + LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuthenticationList is a collection of items +type AuthenticationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Authentication `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go new file mode 100644 index 0000000000..8d1806cd6c --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go @@ -0,0 +1,76 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CloudCredential provides a means to configure an operator to manage CredentialsRequests. +type CloudCredential struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +required + Spec CloudCredentialSpec `json:"spec"` + // +optional + Status CloudCredentialStatus `json:"status"` +} + +// CloudCredentialsMode is the specified mode the cloud-credential-operator +// should reconcile CredentialsRequest with +// +kubebuilder:validation:Enum="";Manual;Mint;Passthrough +type CloudCredentialsMode string + +const ( + // CloudCredentialsModeManual tells cloud-credential-operator to not reconcile any CredentialsRequests + // (primarily used for the disconnected VPC use-cases). + CloudCredentialsModeManual CloudCredentialsMode = "Manual" + + // CloudCredentialsModeMint tells cloud-credential-operator to reconcile all CredentialsRequests + // by minting new users/credentials. + CloudCredentialsModeMint CloudCredentialsMode = "Mint" + + // CloudCredentialsModePassthrough tells cloud-credential-operator to reconcile all CredentialsRequests + // by copying the cloud-specific secret data. + CloudCredentialsModePassthrough CloudCredentialsMode = "Passthrough" + + // CloudCredentialsModeDefault puts CCO into the default mode of operation (per-cloud/platform defaults): + // AWS/Azure/GCP: dynamically determine cluster's cloud credential capabilities to affect + // processing of CredentialsRequests + // All other clouds/platforms (OpenStack, oVirt, vSphere, etc): run in "passthrough" mode + CloudCredentialsModeDefault CloudCredentialsMode = "" +) + +// CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator. +type CloudCredentialSpec struct { + OperatorSpec `json:",inline"` + // CredentialsMode allows informing CCO that it should not attempt to dynamically + // determine the root cloud credentials capabilities, and it should just run in + // the specified mode. + // It also allows putting the operator into "manual" mode if desired. + // Leaving the field in default mode runs CCO so that the cluster's cloud credentials + // will be dynamically probed for capabilities (on supported clouds/platforms). + // Supported modes: + // AWS/Azure/GCP: "" (Default), "Mint", "Passthrough", "Manual" + // Others: Do not set value as other platforms only support running in "Passthrough" + // +optional + CredentialsMode CloudCredentialsMode `json:"credentialsMode,omitempty"` +} + +// CloudCredentialStatus defines the observed status of the cloud-credential-operator. +type CloudCredentialStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type CloudCredentialList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []CloudCredential `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_config.go b/vendor/github.com/openshift/api/operator/v1/types_config.go new file mode 100644 index 0000000000..e073269ffa --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_config.go @@ -0,0 +1,43 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Config provides information to configure the config operator. It handles installation, migration or synchronization of cloud based cluster configurations like AWS or Azure. +type Config struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Config Operator. + // +kubebuilder:validation:Required + // +required + Spec ConfigSpec `json:"spec"` + + // status defines the observed status of the Config Operator. + // +optional + Status ConfigStatus `json:"status"` +} + +type ConfigSpec struct { + OperatorSpec `json:",inline"` +} + +type ConfigStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigList is a collection of items +type ConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []Config `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go new file mode 100644 index 0000000000..2f6443df70 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_console.go @@ -0,0 +1,228 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + configv1 "github.com/openshift/api/config/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Console provides a means to configure an operator to manage the console. +type Console struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +required + Spec ConsoleSpec `json:"spec,omitempty"` + // +optional + Status ConsoleStatus `json:"status,omitempty"` +} + +// ConsoleSpec is the specification of the desired behavior of the Console. +type ConsoleSpec struct { + OperatorSpec `json:",inline"` + // customization is used to optionally provide a small set of + // customization options to the web console. + // +optional + Customization ConsoleCustomization `json:"customization"` + // providers contains configuration for using specific service providers. + Providers ConsoleProviders `json:"providers"` + // route contains hostname and secret reference that contains the serving certificate. + // If a custom route is specified, a new route will be created with the + // provided hostname, under which console will be available. + // In case of custom hostname uses the default routing suffix of the cluster, + // the Secret specification for a serving certificate will not be needed. + // In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. + // The default console route will be maintained to reserve the default hostname + // for console if the custom route is removed. + // If not specified, default route will be used. + // DEPRECATED + // +optional + Route ConsoleConfigRoute `json:"route"` + // plugins defines a list of enabled console plugin names. + // +optional + Plugins []string `json:"plugins,omitempty"` +} + +// ConsoleConfigRoute holds information on external route access to console. +// DEPRECATED +type ConsoleConfigRoute struct { + // hostname is the desired custom domain under which console will be available. + Hostname string `json:"hostname"` + // secret points to secret in the openshift-config namespace that contains custom + // certificate and key and needs to be created manually by the cluster admin. + // Referenced Secret is required to contain following key value pairs: + // - "tls.crt" - to specifies custom certificate + // - "tls.key" - to specifies private key of the custom certificate + // If the custom hostname uses the default routing suffix of the cluster, + // the Secret specification for a serving certificate will not be needed. + // +optional + Secret configv1.SecretNameReference `json:"secret"` +} + +// ConsoleStatus defines the observed status of the Console. +type ConsoleStatus struct { + OperatorStatus `json:",inline"` +} + +// ConsoleProviders defines a list of optional additional providers of +// functionality to the console. +type ConsoleProviders struct { + // statuspage contains ID for statuspage.io page that provides status info about. + // +optional + Statuspage *StatuspageProvider `json:"statuspage,omitempty"` +} + +// StatuspageProvider provides identity for statuspage account. +type StatuspageProvider struct { + // pageID is the unique ID assigned by Statuspage for your page. This must be a public page. + PageID string `json:"pageID"` +} + +// ConsoleCustomization defines a list of optional configuration for the console UI. +type ConsoleCustomization struct { + // brand is the default branding of the web console which can be overridden by + // providing the brand field. There is a limited set of specific brand options. + // This field controls elements of the console such as the logo. + // Invalid value will prevent a console rollout. + Brand Brand `json:"brand,omitempty"` + // documentationBaseURL links to external documentation are shown in various sections + // of the web console. Providing documentationBaseURL will override the default + // documentation URL. + // Invalid value will prevent a console rollout. + // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))\/$` + DocumentationBaseURL string `json:"documentationBaseURL,omitempty"` + // customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog + // instead of the normal OpenShift product name. + // +optional + CustomProductName string `json:"customProductName,omitempty"` + // customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a + // ConfigMap in the openshift-config namespace. This can be created with a command like + // 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. + // Image size must be less than 1 MB due to constraints on the ConfigMap size. + // The ConfigMap key should include a file extension so that the console serves the file + // with the correct MIME type. + // Recommended logo specifications: + // Dimensions: Max height of 68px and max width of 200px + // SVG format preferred + // +optional + CustomLogoFile configv1.ConfigMapFileReference `json:"customLogoFile,omitempty"` + // developerCatalog allows to configure the shown developer catalog categories. + // +kubebuilder:validation:Optional + // +optional + DeveloperCatalog DeveloperConsoleCatalogCustomization `json:"developerCatalog,omitempty"` + // projectAccess allows customizing the available list of ClusterRoles in the Developer perspective + // Project access page which can be used by a project admin to specify roles to other users and + // restrict access within the project. If set, the list will replace the default ClusterRole options. + // +kubebuilder:validation:Optional + // +optional + ProjectAccess ProjectAccess `json:"projectAccess,omitempty"` + // quickStarts allows customization of available ConsoleQuickStart resources in console. + // +kubebuilder:validation:Optional + // +optional + QuickStarts QuickStarts `json:"quickStarts,omitempty"` + // addPage allows customizing actions on the Add page in developer perspective. + // +kubebuilder:validation:Optional + // +optional + AddPage AddPage `json:"addPage,omitempty"` +} + +// ProjectAccess contains options for project access roles +type ProjectAccess struct { + // availableClusterRoles is the list of ClusterRole names that are assignable to users + // through the project access tab. + // +kubebuilder:validation:Optional + // +optional + AvailableClusterRoles []string `json:"availableClusterRoles,omitempty"` +} + +// DeveloperConsoleCatalogCustomization allow cluster admin to configure developer catalog. +type DeveloperConsoleCatalogCustomization struct { + // categories which are shown in the developer catalog. + // +kubebuilder:validation:Optional + // +optional + Categories []DeveloperConsoleCatalogCategory `json:"categories,omitempty"` +} + +// DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category. +type DeveloperConsoleCatalogCategoryMeta struct { + // ID is an identifier used in the URL to enable deep linking in console. + // ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=32 + // +kubebuilder:validation:Pattern=`^[A-Za-z0-9-_]+$` + // +required + ID string `json:"id"` + // label defines a category display label. It is required and must have 1-64 characters. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +required + Label string `json:"label"` + // tags is a list of strings that will match the category. A selected category + // show all items which has at least one overlapping tag between category and item. + // +kubebuilder:validation:Optional + // +optional + Tags []string `json:"tags,omitempty"` +} + +// DeveloperConsoleCatalogCategory for the developer console catalog. +type DeveloperConsoleCatalogCategory struct { + // defines top level category ID, label and filter tags. + DeveloperConsoleCatalogCategoryMeta `json:",inline"` + // subcategories defines a list of child categories. + // +kubebuilder:validation:Optional + // +optional + Subcategories []DeveloperConsoleCatalogCategoryMeta `json:"subcategories,omitempty"` +} + +// QuickStarts allow cluster admins to customize available ConsoleQuickStart resources. +type QuickStarts struct { + // disabled is a list of ConsoleQuickStart resource names that are not shown to users. + // +kubebuilder:validation:Optional + // +optional + Disabled []string `json:"disabled,omitempty"` +} + +// AddPage allows customizing actions on the Add page in developer perspective. +type AddPage struct { + // disabledActions is a list of actions that are not shown to users. + // Each action in the list is represented by its ID. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:MinItems=1 + // +optional + DisabledActions []string `json:"disabledActions,omitempty"` +} + +// Brand is a specific supported brand within the console. +// +kubebuilder:validation:Pattern=`^$|^(ocp|origin|okd|dedicated|online|azure)$` +type Brand string + +const ( + // Branding for OpenShift + BrandOpenShift Brand = "openshift" + // Branding for The Origin Community Distribution of Kubernetes + BrandOKD Brand = "okd" + // Branding for OpenShift Online + BrandOnline Brand = "online" + // Branding for OpenShift Container Platform + BrandOCP Brand = "ocp" + // Branding for OpenShift Dedicated + BrandDedicated Brand = "dedicated" + // Branding for Azure Red Hat OpenShift + BrandAzure Brand = "azure" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ConsoleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Console `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go new file mode 100644 index 0000000000..0e8add92d2 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -0,0 +1,72 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterCSIDriver is used to manage and configure CSI driver installed by default +// in OpenShift. An example configuration may look like: +// apiVersion: operator.openshift.io/v1 +// kind: "ClusterCSIDriver" +// metadata: +// name: "ebs.csi.aws.com" +// spec: +// logLevel: Debug + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterCSIDriver object allows management and configuration of a CSI driver operator +// installed by default in OpenShift. Name of the object must be name of the CSI driver +// it operates. See CSIDriverName type for list of allowed values. +type ClusterCSIDriver struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ClusterCSIDriverSpec `json:"spec"` + + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ClusterCSIDriverStatus `json:"status"` +} + +// CSIDriverName is the name of the CSI driver +type CSIDriverName string + +// If you are adding a new driver name here, ensure that 0000_90_cluster_csi_driver_01_config.crd.yaml-merge-patch file is also updated with new driver name. +const ( + AWSEBSCSIDriver CSIDriverName = "ebs.csi.aws.com" + AWSEFSCSIDriver CSIDriverName = "efs.csi.aws.com" + AzureDiskCSIDriver CSIDriverName = "disk.csi.azure.com" + GCPPDCSIDriver CSIDriverName = "pd.csi.storage.gke.io" + CinderCSIDriver CSIDriverName = "cinder.csi.openstack.org" + VSphereCSIDriver CSIDriverName = "csi.vsphere.vmware.com" + ManilaCSIDriver CSIDriverName = "manila.csi.openstack.org" + OvirtCSIDriver CSIDriverName = "csi.ovirt.org" + KubevirtCSIDriver CSIDriverName = "csi.kubevirt.io" + SharedResourcesCSIDriver CSIDriverName = "csi.shared-resources.openshift.io" +) + +// ClusterCSIDriverSpec is the desired behavior of CSI driver operator +type ClusterCSIDriverSpec struct { + OperatorSpec `json:",inline"` +} + +// ClusterCSIDriverStatus is the observed status of CSI driver operator +type ClusterCSIDriverStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// ClusterCSIDriverList contains a list of ClusterCSIDriver +type ClusterCSIDriverList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterCSIDriver `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go new file mode 100644 index 0000000000..5b6c06aaff --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go @@ -0,0 +1,44 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name. +type CSISnapshotController struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec CSISnapshotControllerSpec `json:"spec"` + + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status CSISnapshotControllerStatus `json:"status"` +} + +// CSISnapshotControllerSpec is the specification of the desired behavior of the CSISnapshotController operator. +type CSISnapshotControllerSpec struct { + OperatorSpec `json:",inline"` +} + +// CSISnapshotControllerStatus defines the observed status of the CSISnapshotController operator. +type CSISnapshotControllerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// CSISnapshotControllerList contains a list of CSISnapshotControllers. +type CSISnapshotControllerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CSISnapshotController `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_dns.go b/vendor/github.com/openshift/api/operator/v1/types_dns.go new file mode 100644 index 0000000000..0f73dc373b --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_dns.go @@ -0,0 +1,183 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + corev1 "k8s.io/api/core/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=dnses,scope=Cluster +// +kubebuilder:subresource:status + +// DNS manages the CoreDNS component to provide a name resolution service +// for pods and services in the cluster. +// +// This supports the DNS-based service discovery specification: +// https://github.com/kubernetes/dns/blob/master/docs/specification.md +// +// More details: https://kubernetes.io/docs/tasks/administer-cluster/coredns +type DNS struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired behavior of the DNS. + Spec DNSSpec `json:"spec,omitempty"` + // status is the most recently observed status of the DNS. + Status DNSStatus `json:"status,omitempty"` +} + +// DNSSpec is the specification of the desired behavior of the DNS. +type DNSSpec struct { + // servers is a list of DNS resolvers that provide name query delegation for one or + // more subdomains outside the scope of the cluster domain. If servers consists of + // more than one Server, longest suffix match will be used to determine the Server. + // + // For example, if there are two Servers, one for "foo.com" and another for "a.foo.com", + // and the name query is for "www.a.foo.com", it will be routed to the Server with Zone + // "a.foo.com". + // + // If this field is nil, no servers are created. + // + // +optional + Servers []Server `json:"servers,omitempty"` + + // nodePlacement provides explicit control over the scheduling of DNS + // pods. + // + // Generally, it is useful to run a DNS pod on every node so that DNS + // queries are always handled by a local DNS pod instead of going over + // the network to a DNS pod on another node. However, security policies + // may require restricting the placement of DNS pods to specific nodes. + // For example, if a security policy prohibits pods on arbitrary nodes + // from communicating with the API, a node selector can be specified to + // restrict DNS pods to nodes that are permitted to communicate with the + // API. Conversely, if running DNS pods on nodes with a particular + // taint is desired, a toleration can be specified for that taint. + // + // If unset, defaults are used. See nodePlacement for more details. + // + // +optional + NodePlacement DNSNodePlacement `json:"nodePlacement,omitempty"` +} + +// Server defines the schema for a server that runs per instance of CoreDNS. +type Server struct { + // name is required and specifies a unique name for the server. Name must comply + // with the Service Name Syntax of rfc6335. + Name string `json:"name"` + // zones is required and specifies the subdomains that Server is authoritative for. + // Zones must conform to the rfc1123 definition of a subdomain. Specifying the + // cluster domain (i.e., "cluster.local") is invalid. + Zones []string `json:"zones"` + // forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages + // to upstream resolvers. + ForwardPlugin ForwardPlugin `json:"forwardPlugin"` +} + +// ForwardPlugin defines a schema for configuring the CoreDNS forward plugin. +type ForwardPlugin struct { + // upstreams is a list of resolvers to forward name queries for subdomains of Zones. + // Upstreams are randomized when more than 1 upstream is specified. Each instance of + // CoreDNS performs health checking of Upstreams. When a healthy upstream returns an + // error during the exchange, another resolver is tried from Upstreams. Each upstream + // is represented by an IP address or IP:port if the upstream listens on a port other + // than 53. + // + // A maximum of 15 upstreams is allowed per ForwardPlugin. + // + // +kubebuilder:validation:MaxItems=15 + Upstreams []string `json:"upstreams"` +} + +// DNSNodePlacement describes the node scheduling configuration for DNS pods. +type DNSNodePlacement struct { + // nodeSelector is the node selector applied to DNS pods. + // + // If empty, the default is used, which is currently the following: + // + // kubernetes.io/os: linux + // + // This default is subject to change. + // + // If set, the specified selector is used and replaces the default. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // tolerations is a list of tolerations applied to DNS pods. + // + // If empty, the DNS operator sets a toleration for the + // "node-role.kubernetes.io/master" taint. This default is subject to + // change. Specifying tolerations without including a toleration for + // the "node-role.kubernetes.io/master" taint may be risky as it could + // lead to an outage if all worker nodes become unavailable. + // + // Note that the daemon controller adds some tolerations as well. See + // https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + // + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +const ( + // Available indicates the DNS controller daemonset is available. + DNSAvailable = "Available" +) + +// DNSStatus defines the observed status of the DNS. +type DNSStatus struct { + // clusterIP is the service IP through which this DNS is made available. + // + // In the case of the default DNS, this will be a well known IP that is used + // as the default nameserver for pods that are using the default ClusterFirst DNS policy. + // + // In general, this IP can be specified in a pod's spec.dnsConfig.nameservers list + // or used explicitly when performing name resolution from within the cluster. + // Example: dig foo.com @ + // + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // + // +kubebuilder:validation:Required + // +required + ClusterIP string `json:"clusterIP"` + + // clusterDomain is the local cluster DNS domain suffix for DNS services. + // This will be a subdomain as defined in RFC 1034, + // section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 + // Example: "cluster.local" + // + // More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service + // + // +kubebuilder:validation:Required + // +required + ClusterDomain string `json:"clusterDomain"` + + // conditions provide information about the state of the DNS on the cluster. + // + // These are the supported DNS conditions: + // + // * Available + // - True if the following conditions are met: + // * DNS controller daemonset is available. + // - False if any of those conditions are unsatisfied. + // + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []OperatorCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// DNSList contains a list of DNS +type DNSList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []DNS `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/vendor/github.com/openshift/api/operator/v1/types_etcd.go new file mode 100644 index 0000000000..106c92b813 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_etcd.go @@ -0,0 +1,40 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Etcd provides information to configure an operator to manage etcd. +type Etcd struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec EtcdSpec `json:"spec"` + // +optional + Status EtcdStatus `json:"status"` +} + +type EtcdSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type EtcdStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeAPISOperatorConfigList is a collection of items +type EtcdList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []Etcd `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go new file mode 100644 index 0000000000..126b53cf0d --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -0,0 +1,1170 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + corev1 "k8s.io/api/core/v1" + + configv1 "github.com/openshift/api/config/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.availableReplicas,selectorpath=.status.selector + +// IngressController describes a managed ingress controller for the cluster. The +// controller can service OpenShift Route and Kubernetes Ingress resources. +// +// When an IngressController is created, a new ingress controller deployment is +// created to allow external traffic to reach the services that expose Ingress +// or Route resources. Updating this resource may lead to disruption for public +// facing network connections as a new ingress controller revision may be rolled +// out. +// +// https://kubernetes.io/docs/concepts/services-networking/ingress-controllers +// +// Whenever possible, sensible defaults for the platform are used. See each +// field for more details. +type IngressController struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired behavior of the IngressController. + Spec IngressControllerSpec `json:"spec,omitempty"` + // status is the most recently observed status of the IngressController. + Status IngressControllerStatus `json:"status,omitempty"` +} + +// IngressControllerSpec is the specification of the desired behavior of the +// IngressController. +type IngressControllerSpec struct { + // domain is a DNS name serviced by the ingress controller and is used to + // configure multiple features: + // + // * For the LoadBalancerService endpoint publishing strategy, domain is + // used to configure DNS records. See endpointPublishingStrategy. + // + // * When using a generated default certificate, the certificate will be valid + // for domain and its subdomains. See defaultCertificate. + // + // * The value is published to individual Route statuses so that end-users + // know where to target external DNS records. + // + // domain must be unique among all IngressControllers, and cannot be + // updated. + // + // If empty, defaults to ingress.config.openshift.io/cluster .spec.domain. + // + // +optional + Domain string `json:"domain,omitempty"` + + // httpErrorCodePages specifies a configmap with custom error pages. + // The administrator must create this configmap in the openshift-config namespace. + // This configmap should have keys in the format "error-page-.http", + // where is an HTTP error code. + // For example, "error-page-503.http" defines an error page for HTTP 503 responses. + // Currently only error pages for 503 and 404 responses can be customized. + // Each value in the configmap should be the full response, including HTTP headers. + // Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http + // If this field is empty, the ingress controller uses the default error pages. + HttpErrorCodePages configv1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"` + + // replicas is the desired number of ingress controller replicas. If unset, + // defaults to 2. + // + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // endpointPublishingStrategy is used to publish the ingress controller + // endpoints to other networks, enable load balancer integrations, etc. + // + // If unset, the default is based on + // infrastructure.config.openshift.io/cluster .status.platform: + // + // AWS: LoadBalancerService (with External scope) + // Azure: LoadBalancerService (with External scope) + // GCP: LoadBalancerService (with External scope) + // IBMCloud: LoadBalancerService (with External scope) + // Libvirt: HostNetwork + // + // Any other platform types (including None) default to HostNetwork. + // + // endpointPublishingStrategy cannot be updated. + // + // +optional + EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` + + // defaultCertificate is a reference to a secret containing the default + // certificate served by the ingress controller. When Routes don't specify + // their own certificate, defaultCertificate is used. + // + // The secret must contain the following keys and data: + // + // tls.crt: certificate file contents + // tls.key: key file contents + // + // If unset, a wildcard certificate is automatically generated and used. The + // certificate is valid for the ingress controller domain (and subdomains) and + // the generated certificate's CA will be automatically integrated with the + // cluster's trust store. + // + // If a wildcard certificate is used and shared by multiple + // HTTP/2 enabled routes (which implies ALPN) then clients + // (i.e., notably browsers) are at liberty to reuse open + // connections. This means a client can reuse a connection to + // another route and that is likely to fail. This behaviour is + // generally known as connection coalescing. + // + // The in-use certificate (whether generated or user-specified) will be + // automatically integrated with OpenShift's built-in OAuth server. + // + // +optional + DefaultCertificate *corev1.LocalObjectReference `json:"defaultCertificate,omitempty"` + + // namespaceSelector is used to filter the set of namespaces serviced by the + // ingress controller. This is useful for implementing shards. + // + // If unset, the default is no filtering. + // + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // routeSelector is used to filter the set of Routes serviced by the ingress + // controller. This is useful for implementing shards. + // + // If unset, the default is no filtering. + // + // +optional + RouteSelector *metav1.LabelSelector `json:"routeSelector,omitempty"` + + // nodePlacement enables explicit control over the scheduling of the ingress + // controller. + // + // If unset, defaults are used. See NodePlacement for more details. + // + // +optional + NodePlacement *NodePlacement `json:"nodePlacement,omitempty"` + + // tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers. + // + // If unset, the default is based on the apiservers.config.openshift.io/cluster resource. + // + // Note that when using the Old, Intermediate, and Modern profile types, the effective + // profile configuration is subject to change between releases. For example, given + // a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade + // to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress + // controller, resulting in a rollout. + // + // Note that the minimum TLS version for ingress controllers is 1.1, and + // the maximum TLS version is 1.2. An implication of this restriction + // is that the Modern TLS profile type cannot be used because it + // requires TLS 1.3. + // + // +optional + TLSSecurityProfile *configv1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + + // routeAdmission defines a policy for handling new route claims (for example, + // to allow or deny claims across namespaces). + // + // If empty, defaults will be applied. See specific routeAdmission fields + // for details about their defaults. + // + // +optional + RouteAdmission *RouteAdmissionPolicy `json:"routeAdmission,omitempty"` + + // logging defines parameters for what should be logged where. If this + // field is empty, operational logs are enabled but access logs are + // disabled. + // + // +optional + Logging *IngressControllerLogging `json:"logging,omitempty"` + + // httpHeaders defines policy for HTTP headers. + // + // If this field is empty, the default values are used. + // + // +optional + HTTPHeaders *IngressControllerHTTPHeaders `json:"httpHeaders,omitempty"` + + // tuningOptions defines parameters for adjusting the performance of + // ingress controller pods. All fields are optional and will use their + // respective defaults if not set. See specific tuningOptions fields for + // more details. + // + // Setting fields within tuningOptions is generally not recommended. The + // default values are suitable for most configurations. + // + // +optional + TuningOptions IngressControllerTuningOptions `json:"tuningOptions,omitempty"` + + // unsupportedConfigOverrides allows specifying unsupported + // configuration options. Its use is unsupported. + // + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + UnsupportedConfigOverrides runtime.RawExtension `json:"unsupportedConfigOverrides"` +} + +// NodePlacement describes node scheduling configuration for an ingress +// controller. +type NodePlacement struct { + // nodeSelector is the node selector applied to ingress controller + // deployments. + // + // If unset, the default is: + // + // kubernetes.io/os: linux + // node-role.kubernetes.io/worker: '' + // + // If set, the specified selector is used and replaces the default. + // + // +optional + NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` + + // tolerations is a list of tolerations applied to ingress controller + // deployments. + // + // The default is an empty list. + // + // See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + // + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// EndpointPublishingStrategyType is a way to publish ingress controller endpoints. +// +kubebuilder:validation:Enum=LoadBalancerService;HostNetwork;Private;NodePortService +type EndpointPublishingStrategyType string + +const ( + // LoadBalancerService publishes the ingress controller using a Kubernetes + // LoadBalancer Service. + LoadBalancerServiceStrategyType EndpointPublishingStrategyType = "LoadBalancerService" + + // HostNetwork publishes the ingress controller on node ports where the + // ingress controller is deployed. + HostNetworkStrategyType EndpointPublishingStrategyType = "HostNetwork" + + // Private does not publish the ingress controller. + PrivateStrategyType EndpointPublishingStrategyType = "Private" + + // NodePortService publishes the ingress controller using a Kubernetes NodePort Service. + NodePortServiceStrategyType EndpointPublishingStrategyType = "NodePortService" +) + +// LoadBalancerScope is the scope at which a load balancer is exposed. +// +kubebuilder:validation:Enum=Internal;External +type LoadBalancerScope string + +var ( + // InternalLoadBalancer is a load balancer that is exposed only on the + // cluster's private network. + InternalLoadBalancer LoadBalancerScope = "Internal" + + // ExternalLoadBalancer is a load balancer that is exposed on the + // cluster's public network (which is typically on the Internet). + ExternalLoadBalancer LoadBalancerScope = "External" +) + +// LoadBalancerStrategy holds parameters for a load balancer. +type LoadBalancerStrategy struct { + // scope indicates the scope at which the load balancer is exposed. + // Possible values are "External" and "Internal". + // + // +kubebuilder:validation:Required + // +required + Scope LoadBalancerScope `json:"scope"` + + // providerParameters holds desired load balancer information specific to + // the underlying infrastructure provider. + // + // If empty, defaults will be applied. See specific providerParameters + // fields for details about their defaults. + // + // +optional + ProviderParameters *ProviderLoadBalancerParameters `json:"providerParameters,omitempty"` +} + +// ProviderLoadBalancerParameters holds desired load balancer information +// specific to the underlying infrastructure provider. +// +union +type ProviderLoadBalancerParameters struct { + // type is the underlying infrastructure provider for the load balancer. + // Allowed values are "AWS", "Azure", "BareMetal", "GCP", "OpenStack", + // and "VSphere". + // + // +unionDiscriminator + // +kubebuilder:validation:Required + // +required + Type LoadBalancerProviderType `json:"type"` + + // aws provides configuration settings that are specific to AWS + // load balancers. + // + // If empty, defaults will be applied. See specific aws fields for + // details about their defaults. + // + // +optional + AWS *AWSLoadBalancerParameters `json:"aws,omitempty"` + + // gcp provides configuration settings that are specific to GCP + // load balancers. + // + // If empty, defaults will be applied. See specific gcp fields for + // details about their defaults. + // + // +optional + GCP *GCPLoadBalancerParameters `json:"gcp,omitempty"` +} + +// LoadBalancerProviderType is the underlying infrastructure provider for the +// load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", +// "OpenStack", and "VSphere". +// +// +kubebuilder:validation:Enum=AWS;Azure;BareMetal;GCP;OpenStack;VSphere;IBM +type LoadBalancerProviderType string + +const ( + AWSLoadBalancerProvider LoadBalancerProviderType = "AWS" + AzureLoadBalancerProvider LoadBalancerProviderType = "Azure" + GCPLoadBalancerProvider LoadBalancerProviderType = "GCP" + OpenStackLoadBalancerProvider LoadBalancerProviderType = "OpenStack" + VSphereLoadBalancerProvider LoadBalancerProviderType = "VSphere" + IBMLoadBalancerProvider LoadBalancerProviderType = "IBM" + BareMetalLoadBalancerProvider LoadBalancerProviderType = "BareMetal" +) + +// AWSLoadBalancerParameters provides configuration settings that are +// specific to AWS load balancers. +// +union +type AWSLoadBalancerParameters struct { + // type is the type of AWS load balancer to instantiate for an ingresscontroller. + // + // Valid values are: + // + // * "Classic": A Classic Load Balancer that makes routing decisions at either + // the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See + // the following for additional details: + // + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb + // + // * "NLB": A Network Load Balancer that makes routing decisions at the + // transport layer (TCP/SSL). See the following for additional details: + // + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb + // + // +unionDiscriminator + // +kubebuilder:validation:Required + // +required + Type AWSLoadBalancerType `json:"type"` + + // classicLoadBalancerParameters holds configuration parameters for an AWS + // classic load balancer. Present only if type is Classic. + // + // +optional + ClassicLoadBalancerParameters *AWSClassicLoadBalancerParameters `json:"classicLoadBalancer,omitempty"` + + // networkLoadBalancerParameters holds configuration parameters for an AWS + // network load balancer. Present only if type is NLB. + // + // +optional + NetworkLoadBalancerParameters *AWSNetworkLoadBalancerParameters `json:"networkLoadBalancer,omitempty"` +} + +// AWSLoadBalancerType is the type of AWS load balancer to instantiate. +// +kubebuilder:validation:Enum=Classic;NLB +type AWSLoadBalancerType string + +const ( + AWSClassicLoadBalancer AWSLoadBalancerType = "Classic" + AWSNetworkLoadBalancer AWSLoadBalancerType = "NLB" +) + +// GCPLoadBalancerParameters provides configuration settings that are +// specific to GCP load balancers. +type GCPLoadBalancerParameters struct { + // clientAccess describes how client access is restricted for internal + // load balancers. + // + // Valid values are: + // * "Global": Specifying an internal load balancer with Global client access + // allows clients from any region within the VPC to communicate with the load + // balancer. + // + // https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access + // + // * "Local": Specifying an internal load balancer with Local client access + // means only clients within the same region (and VPC) as the GCP load balancer + // can communicate with the load balancer. Note that this is the default behavior. + // + // https://cloud.google.com/load-balancing/docs/internal#client_access + // + // +optional + ClientAccess GCPClientAccess `json:"clientAccess,omitempty"` +} + +// GCPClientAccess describes how client access is restricted for internal +// load balancers. +// +kubebuilder:validation:Enum=Global;Local +type GCPClientAccess string + +const ( + GCPGlobalAccess GCPClientAccess = "Global" + GCPLocalAccess GCPClientAccess = "Local" +) + +// AWSClassicLoadBalancerParameters holds configuration parameters for an +// AWS Classic load balancer. +type AWSClassicLoadBalancerParameters struct { +} + +// AWSNetworkLoadBalancerParameters holds configuration parameters for an +// AWS Network load balancer. +type AWSNetworkLoadBalancerParameters struct { +} + +// HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing +// strategy. +type HostNetworkStrategy struct { + // protocol specifies whether the IngressController expects incoming + // connections to use plain TCP or whether the IngressController expects + // PROXY protocol. + // + // PROXY protocol can be used with load balancers that support it to + // communicate the source addresses of client connections when + // forwarding those connections to the IngressController. Using PROXY + // protocol enables the IngressController to report those source + // addresses instead of reporting the load balancer's address in HTTP + // headers and logs. Note that enabling PROXY protocol on the + // IngressController will cause connections to fail if you are not using + // a load balancer that uses PROXY protocol to forward connections to + // the IngressController. See + // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for + // information about PROXY protocol. + // + // The following values are valid for this field: + // + // * The empty string. + // * "TCP". + // * "PROXY". + // + // The empty string specifies the default, which is TCP without PROXY + // protocol. Note that the default is subject to change. + // + // +kubebuilder:validation:Optional + // +optional + Protocol IngressControllerProtocol `json:"protocol,omitempty"` +} + +// PrivateStrategy holds parameters for the Private endpoint publishing +// strategy. +type PrivateStrategy struct { +} + +// NodePortStrategy holds parameters for the NodePortService endpoint publishing strategy. +type NodePortStrategy struct { + // protocol specifies whether the IngressController expects incoming + // connections to use plain TCP or whether the IngressController expects + // PROXY protocol. + // + // PROXY protocol can be used with load balancers that support it to + // communicate the source addresses of client connections when + // forwarding those connections to the IngressController. Using PROXY + // protocol enables the IngressController to report those source + // addresses instead of reporting the load balancer's address in HTTP + // headers and logs. Note that enabling PROXY protocol on the + // IngressController will cause connections to fail if you are not using + // a load balancer that uses PROXY protocol to forward connections to + // the IngressController. See + // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for + // information about PROXY protocol. + // + // The following values are valid for this field: + // + // * The empty string. + // * "TCP". + // * "PROXY". + // + // The empty string specifies the default, which is TCP without PROXY + // protocol. Note that the default is subject to change. + // + // +kubebuilder:validation:Optional + // +optional + Protocol IngressControllerProtocol `json:"protocol,omitempty"` +} + +// IngressControllerProtocol specifies whether PROXY protocol is enabled or not. +// +kubebuilder:validation:Enum="";TCP;PROXY +type IngressControllerProtocol string + +const ( + DefaultProtocol IngressControllerProtocol = "" + TCPProtocol IngressControllerProtocol = "TCP" + ProxyProtocol IngressControllerProtocol = "PROXY" +) + +// EndpointPublishingStrategy is a way to publish the endpoints of an +// IngressController, and represents the type and any additional configuration +// for a specific type. +// +union +type EndpointPublishingStrategy struct { + // type is the publishing strategy to use. Valid values are: + // + // * LoadBalancerService + // + // Publishes the ingress controller using a Kubernetes LoadBalancer Service. + // + // In this configuration, the ingress controller deployment uses container + // networking. A LoadBalancer Service is created to publish the deployment. + // + // See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + // + // If domain is set, a wildcard DNS record will be managed to point at the + // LoadBalancer Service's external name. DNS records are managed only in DNS + // zones defined by dns.config.openshift.io/cluster .spec.publicZone and + // .spec.privateZone. + // + // Wildcard DNS management is currently supported only on the AWS, Azure, + // and GCP platforms. + // + // * HostNetwork + // + // Publishes the ingress controller on node ports where the ingress controller + // is deployed. + // + // In this configuration, the ingress controller deployment uses host + // networking, bound to node ports 80 and 443. The user is responsible for + // configuring an external load balancer to publish the ingress controller via + // the node ports. + // + // * Private + // + // Does not publish the ingress controller. + // + // In this configuration, the ingress controller deployment uses container + // networking, and is not explicitly published. The user must manually publish + // the ingress controller. + // + // * NodePortService + // + // Publishes the ingress controller using a Kubernetes NodePort Service. + // + // In this configuration, the ingress controller deployment uses container + // networking. A NodePort Service is created to publish the deployment. The + // specific node ports are dynamically allocated by OpenShift; however, to + // support static port allocations, user changes to the node port + // field of the managed NodePort Service will preserved. + // + // +unionDiscriminator + // +kubebuilder:validation:Required + // +required + Type EndpointPublishingStrategyType `json:"type"` + + // loadBalancer holds parameters for the load balancer. Present only if + // type is LoadBalancerService. + // +optional + LoadBalancer *LoadBalancerStrategy `json:"loadBalancer,omitempty"` + + // hostNetwork holds parameters for the HostNetwork endpoint publishing + // strategy. Present only if type is HostNetwork. + // +optional + HostNetwork *HostNetworkStrategy `json:"hostNetwork,omitempty"` + + // private holds parameters for the Private endpoint publishing + // strategy. Present only if type is Private. + // +optional + Private *PrivateStrategy `json:"private,omitempty"` + + // nodePort holds parameters for the NodePortService endpoint publishing strategy. + // Present only if type is NodePortService. + // +optional + NodePort *NodePortStrategy `json:"nodePort,omitempty"` +} + +// RouteAdmissionPolicy is an admission policy for allowing new route claims. +type RouteAdmissionPolicy struct { + // namespaceOwnership describes how host name claims across namespaces should + // be handled. + // + // Value must be one of: + // + // - Strict: Do not allow routes in different namespaces to claim the same host. + // + // - InterNamespaceAllowed: Allow routes to claim different paths of the same + // host name across namespaces. + // + // If empty, the default is Strict. + // +optional + NamespaceOwnership NamespaceOwnershipCheck `json:"namespaceOwnership,omitempty"` + // wildcardPolicy describes how routes with wildcard policies should + // be handled for the ingress controller. WildcardPolicy controls use + // of routes [1] exposed by the ingress controller based on the route's + // wildcard policy. + // + // [1] https://github.com/openshift/api/blob/master/route/v1/types.go + // + // Note: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed + // will cause admitted routes with a wildcard policy of Subdomain to stop + // working. These routes must be updated to a wildcard policy of None to be + // readmitted by the ingress controller. + // + // WildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values. + // + // If empty, defaults to "WildcardsDisallowed". + // + WildcardPolicy WildcardPolicy `json:"wildcardPolicy,omitempty"` +} + +// WildcardPolicy is a route admission policy component that describes how +// routes with a wildcard policy should be handled. +// +kubebuilder:validation:Enum=WildcardsAllowed;WildcardsDisallowed +type WildcardPolicy string + +const ( + // WildcardPolicyAllowed indicates routes with any wildcard policy are + // admitted by the ingress controller. + WildcardPolicyAllowed WildcardPolicy = "WildcardsAllowed" + + // WildcardPolicyDisallowed indicates only routes with a wildcard policy + // of None are admitted by the ingress controller. + WildcardPolicyDisallowed WildcardPolicy = "WildcardsDisallowed" +) + +// NamespaceOwnershipCheck is a route admission policy component that describes +// how host name claims across namespaces should be handled. +// +kubebuilder:validation:Enum=InterNamespaceAllowed;Strict +type NamespaceOwnershipCheck string + +const ( + // InterNamespaceAllowedOwnershipCheck allows routes to claim different paths of the same host name across namespaces. + InterNamespaceAllowedOwnershipCheck NamespaceOwnershipCheck = "InterNamespaceAllowed" + + // StrictNamespaceOwnershipCheck does not allow routes to claim the same host name across namespaces. + StrictNamespaceOwnershipCheck NamespaceOwnershipCheck = "Strict" +) + +// LoggingDestinationType is a type of destination to which to send log +// messages. +// +// +kubebuilder:validation:Enum=Container;Syslog +type LoggingDestinationType string + +const ( + // Container sends log messages to a sidecar container. + ContainerLoggingDestinationType LoggingDestinationType = "Container" + + // Syslog sends log messages to a syslog endpoint. + SyslogLoggingDestinationType LoggingDestinationType = "Syslog" + + // ContainerLoggingSidecarContainerName is the name of the container + // with the log output in an ingress controller pod when container + // logging is used. + ContainerLoggingSidecarContainerName = "logs" +) + +// SyslogLoggingDestinationParameters describes parameters for the Syslog +// logging destination type. +type SyslogLoggingDestinationParameters struct { + // address is the IP address of the syslog endpoint that receives log + // messages. + // + // +kubebuilder:validation:Required + // +required + Address string `json:"address"` + + // port is the UDP port number of the syslog endpoint that receives log + // messages. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +required + Port uint32 `json:"port"` + + // facility specifies the syslog facility of log messages. + // + // If this field is empty, the facility is "local1". + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;cron;auth2;ftp;ntp;audit;alert;cron2;local0;local1;local2;local3;local4;local5;local6;local7 + // +optional + Facility string `json:"facility,omitempty"` +} + +// ContainerLoggingDestinationParameters describes parameters for the Container +// logging destination type. +type ContainerLoggingDestinationParameters struct { +} + +// LoggingDestination describes a destination for log messages. +// +union +type LoggingDestination struct { + // type is the type of destination for logs. It must be one of the + // following: + // + // * Container + // + // The ingress operator configures the sidecar container named "logs" on + // the ingress controller pod and configures the ingress controller to + // write logs to the sidecar. The logs are then available as container + // logs. The expectation is that the administrator configures a custom + // logging solution that reads logs from this sidecar. Note that using + // container logs means that logs may be dropped if the rate of logs + // exceeds the container runtime's or the custom logging solution's + // capacity. + // + // * Syslog + // + // Logs are sent to a syslog endpoint. The administrator must specify + // an endpoint that can receive syslog messages. The expectation is + // that the administrator has configured a custom syslog instance. + // + // +unionDiscriminator + // +kubebuilder:validation:Required + // +required + Type LoggingDestinationType `json:"type"` + + // syslog holds parameters for a syslog endpoint. Present only if + // type is Syslog. + // + // +optional + Syslog *SyslogLoggingDestinationParameters `json:"syslog,omitempty"` + + // container holds parameters for the Container logging destination. + // Present only if type is Container. + // + // +optional + Container *ContainerLoggingDestinationParameters `json:"container,omitempty"` +} + +// IngressControllerCaptureHTTPHeader describes an HTTP header that should be +// captured. +type IngressControllerCaptureHTTPHeader struct { + // name specifies a header name. Its value must be a valid HTTP header + // name as defined in RFC 2616 section 4.2. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +required + Name string `json:"name"` + + // maxLength specifies a maximum length for the header value. If a + // header value exceeds this length, the value will be truncated in the + // log message. Note that the ingress controller may impose a separate + // bound on the total length of HTTP headers in a request. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + // +required + MaxLength int `json:"maxLength"` +} + +// IngressControllerCaptureHTTPHeaders specifies which HTTP headers the +// IngressController captures. +type IngressControllerCaptureHTTPHeaders struct { + // request specifies which HTTP request headers to capture. + // + // If this field is empty, no request headers are captured. + // + // +nullable + // +optional + Request []IngressControllerCaptureHTTPHeader `json:"request,omitempty"` + + // response specifies which HTTP response headers to capture. + // + // If this field is empty, no response headers are captured. + // + // +nullable + // +optional + Response []IngressControllerCaptureHTTPHeader `json:"response,omitempty"` +} + +// CookieMatchType indicates the type of matching used against cookie names to +// select a cookie for capture. +// +kubebuilder:validation:Enum=Exact;Prefix +type CookieMatchType string + +const ( + // CookieMatchTypeExact indicates that an exact string match should be + // performed. + CookieMatchTypeExact CookieMatchType = "Exact" + // CookieMatchTypePrefix indicates that a string prefix match should be + // performed. + CookieMatchTypePrefix CookieMatchType = "Prefix" +) + +// IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be +// captured. +type IngressControllerCaptureHTTPCookie struct { + IngressControllerCaptureHTTPCookieUnion `json:",inline"` + + // maxLength specifies a maximum length of the string that will be + // logged, which includes the cookie name, cookie value, and + // one-character delimiter. If the log entry exceeds this length, the + // value will be truncated in the log message. Note that the ingress + // controller may impose a separate bound on the total length of HTTP + // headers in a request. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=1024 + // +required + MaxLength int `json:"maxLength"` +} + +// IngressControllerCaptureHTTPCookieUnion describes optional fields of an HTTP cookie that should be captured. +// +union +type IngressControllerCaptureHTTPCookieUnion struct { + // matchType specifies the type of match to be performed on the cookie + // name. Allowed values are "Exact" for an exact string match and + // "Prefix" for a string prefix match. If "Exact" is specified, a name + // must be specified in the name field. If "Prefix" is provided, a + // prefix must be specified in the namePrefix field. For example, + // specifying matchType "Prefix" and namePrefix "foo" will capture a + // cookie named "foo" or "foobar" but not one named "bar". The first + // matching cookie is captured. + // + // +unionDiscriminator + // +kubebuilder:validation:Required + // +required + MatchType CookieMatchType `json:"matchType,omitempty"` + + // name specifies a cookie name. Its value must be a valid HTTP cookie + // name as defined in RFC 6265 section 4.1. + // + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + // +optional + Name string `json:"name"` + + // namePrefix specifies a cookie name prefix. Its value must be a valid + // HTTP cookie name as defined in RFC 6265 section 4.1. + // + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + // +optional + NamePrefix string `json:"namePrefix"` +} + +// AccessLogging describes how client requests should be logged. +type AccessLogging struct { + // destination is where access logs go. + // + // +kubebuilder:validation:Required + // +required + Destination LoggingDestination `json:"destination"` + + // httpLogFormat specifies the format of the log message for an HTTP + // request. + // + // If this field is empty, log messages use the implementation's default + // HTTP log format. For HAProxy's default HTTP log format, see the + // HAProxy documentation: + // http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 + // + // Note that this format only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). It does not affect the log format for TLS passthrough + // connections. + // + // +optional + HttpLogFormat string `json:"httpLogFormat,omitempty"` + + // httpCaptureHeaders defines HTTP headers that should be captured in + // access logs. If this field is empty, no headers are captured. + // + // Note that this option only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). Headers cannot be captured for TLS passthrough + // connections. + // + // +optional + HTTPCaptureHeaders IngressControllerCaptureHTTPHeaders `json:"httpCaptureHeaders,omitempty"` + + // httpCaptureCookies specifies HTTP cookies that should be captured in + // access logs. If this field is empty, no cookies are captured. + // + // +nullable + // +optional + // +kubebuilder:validation:MaxItems=1 + HTTPCaptureCookies []IngressControllerCaptureHTTPCookie `json:"httpCaptureCookies,omitempty"` +} + +// IngressControllerLogging describes what should be logged where. +type IngressControllerLogging struct { + // access describes how the client requests should be logged. + // + // If this field is empty, access logging is disabled. + // + // +optional + Access *AccessLogging `json:"access,omitempty"` +} + +// IngressControllerHTTPHeaderPolicy is a policy for setting HTTP headers. +// +// +kubebuilder:validation:Enum=Append;Replace;IfNone;Never +type IngressControllerHTTPHeaderPolicy string + +const ( + // AppendHTTPHeaderPolicy appends the header, preserving any existing header. + AppendHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Append" + // ReplaceHTTPHeaderPolicy sets the header, removing any existing header. + ReplaceHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Replace" + // IfNoneHTTPHeaderPolicy sets the header if it is not already set. + IfNoneHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "IfNone" + // NeverHTTPHeaderPolicy never sets the header, preserving any existing + // header. + NeverHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Never" +) + +// IngressControllerHTTPUniqueIdHeaderPolicy describes configuration for a +// unique id header. +type IngressControllerHTTPUniqueIdHeaderPolicy struct { + // name specifies the name of the HTTP header (for example, "unique-id") + // that the ingress controller should inject into HTTP requests. The + // field's value must be a valid HTTP header name as defined in RFC 2616 + // section 4.2. If the field is empty, no header is injected. + // + // +optional + // +kubebuilder:validation:Pattern="^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + Name string `json:"name,omitempty"` + + // format specifies the format for the injected HTTP header's value. + // This field has no effect unless name is specified. For the + // HAProxy-based ingress controller implementation, this format uses the + // same syntax as the HTTP log format. If the field is empty, the + // default value is "%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid"; see the + // corresponding HAProxy documentation: + // http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 + // + // +optional + // +kubebuilder:validation:Pattern="^(%(%|(\\{[-+]?[QXE](,[-+]?[QXE])*\\})?([A-Za-z]+|\\[[.0-9A-Z_a-z]+(\\([^)]+\\))?(,[.0-9A-Z_a-z]+(\\([^)]+\\))?)*\\]))|[^%[:cntrl:]])*$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + Format string `json:"format,omitempty"` +} + +// IngressControllerHTTPHeaderNameCaseAdjustment is the name of an HTTP header +// (for example, "X-Forwarded-For") in the desired capitalization. The value +// must be a valid HTTP header name as defined in RFC 2616 section 4.2. +// +// +optional +// +kubebuilder:validation:Pattern="^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" +// +kubebuilder:validation:MinLength=0 +// +kubebuilder:validation:MaxLength=1024 +type IngressControllerHTTPHeaderNameCaseAdjustment string + +// IngressControllerHTTPHeaders specifies how the IngressController handles +// certain HTTP headers. +type IngressControllerHTTPHeaders struct { + // forwardedHeaderPolicy specifies when and how the IngressController + // sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, + // X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version + // HTTP headers. The value may be one of the following: + // + // * "Append", which specifies that the IngressController appends the + // headers, preserving existing headers. + // + // * "Replace", which specifies that the IngressController sets the + // headers, replacing any existing Forwarded or X-Forwarded-* headers. + // + // * "IfNone", which specifies that the IngressController sets the + // headers if they are not already set. + // + // * "Never", which specifies that the IngressController never sets the + // headers, preserving any existing headers. + // + // By default, the policy is "Append". + // + // +optional + ForwardedHeaderPolicy IngressControllerHTTPHeaderPolicy `json:"forwardedHeaderPolicy,omitempty"` + + // uniqueId describes configuration for a custom HTTP header that the + // ingress controller should inject into incoming HTTP requests. + // Typically, this header is configured to have a value that is unique + // to the HTTP request. The header can be used by applications or + // included in access logs to facilitate tracing individual HTTP + // requests. + // + // If this field is empty, no such header is injected into requests. + // + // +optional + UniqueId IngressControllerHTTPUniqueIdHeaderPolicy `json:"uniqueId,omitempty"` + + // headerNameCaseAdjustments specifies case adjustments that can be + // applied to HTTP header names. Each adjustment is specified as an + // HTTP header name with the desired capitalization. For example, + // specifying "X-Forwarded-For" indicates that the "x-forwarded-for" + // HTTP header should be adjusted to have the specified capitalization. + // + // These adjustments are only applied to cleartext, edge-terminated, and + // re-encrypt routes, and only when using HTTP/1. + // + // For request headers, these adjustments are applied only for routes + // that have the haproxy.router.openshift.io/h1-adjust-case=true + // annotation. For response headers, these adjustments are applied to + // all HTTP responses. + // + // If this field is empty, no request headers are adjusted. + // + // +nullable + // +optional + HeaderNameCaseAdjustments []IngressControllerHTTPHeaderNameCaseAdjustment `json:"headerNameCaseAdjustments,omitempty"` +} + +// IngressControllerTuningOptions specifies options for tuning the performance +// of ingress controller pods +type IngressControllerTuningOptions struct { + // headerBufferBytes describes how much memory should be reserved + // (in bytes) for IngressController connection sessions. + // Note that this value must be at least 16384 if HTTP/2 is + // enabled for the IngressController (https://tools.ietf.org/html/rfc7540). + // If this field is empty, the IngressController will use a default value + // of 32768 bytes. + // + // Setting this field is generally not recommended as headerBufferBytes + // values that are too small may break the IngressController and + // headerBufferBytes values that are too large could cause the + // IngressController to use significantly more memory than necessary. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=16384 + // +optional + HeaderBufferBytes int32 `json:"headerBufferBytes,omitempty"` + + // headerBufferMaxRewriteBytes describes how much memory should be reserved + // (in bytes) from headerBufferBytes for HTTP header rewriting + // and appending for IngressController connection sessions. + // Note that incoming HTTP requests will be limited to + // (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning + // headerBufferBytes must be greater than headerBufferMaxRewriteBytes. + // If this field is empty, the IngressController will use a default value + // of 8192 bytes. + // + // Setting this field is generally not recommended as + // headerBufferMaxRewriteBytes values that are too small may break the + // IngressController and headerBufferMaxRewriteBytes values that are too + // large could cause the IngressController to use significantly more memory + // than necessary. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=4096 + // +optional + HeaderBufferMaxRewriteBytes int32 `json:"headerBufferMaxRewriteBytes,omitempty"` + + // threadCount defines the number of threads created per HAProxy process. + // Creating more threads allows each ingress controller pod to handle more + // connections, at the cost of more system resources being used. HAProxy + // currently supports up to 64 threads. If this field is empty, the + // IngressController will use the default value. The current default is 4 + // threads, but this may change in future releases. + // + // Setting this field is generally not recommended. Increasing the number + // of HAProxy threads allows ingress controller pods to utilize more CPU + // time under load, potentially starving other pods if set too high. + // Reducing the number of threads may cause the ingress controller to + // perform poorly. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=64 + // +optional + ThreadCount int32 `json:"threadCount,omitempty"` +} + +var ( + // Available indicates the ingress controller deployment is available. + IngressControllerAvailableConditionType = "Available" + // LoadBalancerManaged indicates the management status of any load balancer + // service associated with an ingress controller. + LoadBalancerManagedIngressConditionType = "LoadBalancerManaged" + // LoadBalancerReady indicates the ready state of any load balancer service + // associated with an ingress controller. + LoadBalancerReadyIngressConditionType = "LoadBalancerReady" + // DNSManaged indicates the management status of any DNS records for the + // ingress controller. + DNSManagedIngressConditionType = "DNSManaged" + // DNSReady indicates the ready state of any DNS records for the ingress + // controller. + DNSReadyIngressConditionType = "DNSReady" +) + +// IngressControllerStatus defines the observed status of the IngressController. +type IngressControllerStatus struct { + // availableReplicas is number of observed available replicas according to the + // ingress controller deployment. + AvailableReplicas int32 `json:"availableReplicas"` + + // selector is a label selector, in string format, for ingress controller pods + // corresponding to the IngressController. The number of matching pods should + // equal the value of availableReplicas. + Selector string `json:"selector"` + + // domain is the actual domain in use. + Domain string `json:"domain"` + + // endpointPublishingStrategy is the actual strategy in use. + EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` + + // conditions is a list of conditions and their status. + // + // Available means the ingress controller deployment is available and + // servicing route and ingress resources (i.e, .status.availableReplicas + // equals .spec.replicas) + // + // There are additional conditions which indicate the status of other + // ingress controller features and capabilities. + // + // * LoadBalancerManaged + // - True if the following conditions are met: + // * The endpoint publishing strategy requires a service load balancer. + // - False if any of those conditions are unsatisfied. + // + // * LoadBalancerReady + // - True if the following conditions are met: + // * A load balancer is managed. + // * The load balancer is ready. + // - False if any of those conditions are unsatisfied. + // + // * DNSManaged + // - True if the following conditions are met: + // * The endpoint publishing strategy and platform support DNS. + // * The ingress controller domain is set. + // * dns.config.openshift.io/cluster configures DNS zones. + // - False if any of those conditions are unsatisfied. + // + // * DNSReady + // - True if the following conditions are met: + // * DNS is managed. + // * DNS records have been successfully created. + // - False if any of those conditions are unsatisfied. + Conditions []OperatorCondition `json:"conditions,omitempty"` + + // tlsProfile is the TLS connection configuration that is in effect. + // +optional + TLSProfile *configv1.TLSProfileSpec `json:"tlsProfile,omitempty"` + + // observedGeneration is the most recent generation observed. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// IngressControllerList contains a list of IngressControllers. +type IngressControllerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IngressController `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go new file mode 100644 index 0000000000..cd657c5542 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go @@ -0,0 +1,43 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeAPIServer provides information to configure an operator to manage kube-apiserver. +type KubeAPIServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes API Server + // +kubebuilder:validation:Required + // +required + Spec KubeAPIServerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes API Server + // +optional + Status KubeAPIServerStatus `json:"status"` +} + +type KubeAPIServerSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type KubeAPIServerStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeAPIServerList is a collection of items +type KubeAPIServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []KubeAPIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go new file mode 100644 index 0000000000..c20ae30ccd --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go @@ -0,0 +1,43 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeControllerManager provides information to configure an operator to manage kube-controller-manager. +type KubeControllerManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes Controller Manager + // +kubebuilder:validation:Required + // +required + Spec KubeControllerManagerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes Controller Manager + // +optional + Status KubeControllerManagerStatus `json:"status"` +} + +type KubeControllerManagerSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type KubeControllerManagerStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeControllerManagerList is a collection of items +type KubeControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []KubeControllerManager `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go new file mode 100644 index 0000000000..5949ac021a --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go @@ -0,0 +1,40 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator. +type KubeStorageVersionMigrator struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec KubeStorageVersionMigratorSpec `json:"spec"` + // +optional + Status KubeStorageVersionMigratorStatus `json:"status"` +} + +type KubeStorageVersionMigratorSpec struct { + OperatorSpec `json:",inline"` +} + +type KubeStorageVersionMigratorStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeStorageVersionMigratorList is a collection of items +type KubeStorageVersionMigratorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []KubeStorageVersionMigrator `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go new file mode 100644 index 0000000000..0a19e3ad21 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -0,0 +1,540 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Network describes the cluster's desired network configuration. It is +// consumed by the cluster-network-operator. +// +k8s:openapi-gen=true +type Network struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NetworkSpec `json:"spec,omitempty"` + Status NetworkStatus `json:"status,omitempty"` +} + +// NetworkStatus is detailed operator status, which is distilled +// up to the Network clusteroperator object. +type NetworkStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkList contains a list of Network configurations +type NetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Network `json:"items"` +} + +// NetworkSpec is the top-level network configuration object. +type NetworkSpec struct { + OperatorSpec `json:",inline"` + + // clusterNetwork is the IP address pool to use for pod IPs. + // Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. + // Others only support one. This is equivalent to the cluster-cidr. + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` + + // serviceNetwork is the ip address pool to use for Service IPs + // Currently, all existing network providers only support a single value + // here, but this is an array to allow for growth. + ServiceNetwork []string `json:"serviceNetwork"` + + // defaultNetwork is the "default" network that all pods will receive + DefaultNetwork DefaultNetworkDefinition `json:"defaultNetwork"` + + // additionalNetworks is a list of extra networks to make available to pods + // when multiple networks are enabled. + AdditionalNetworks []AdditionalNetworkDefinition `json:"additionalNetworks,omitempty"` + + // disableMultiNetwork specifies whether or not multiple pod network + // support should be disabled. If unset, this property defaults to + // 'false' and multiple network support is enabled. + DisableMultiNetwork *bool `json:"disableMultiNetwork,omitempty"` + + // useMultiNetworkPolicy enables a controller which allows for + // MultiNetworkPolicy objects to be used on additional networks as + // created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy + // objects, but NetworkPolicy objects only apply to the primary interface. + // With MultiNetworkPolicy, you can control the traffic that a pod can receive + // over the secondary interfaces. If unset, this property defaults to 'false' + // and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is + // 'true' then the value of this field is ignored. + UseMultiNetworkPolicy *bool `json:"useMultiNetworkPolicy,omitempty"` + + // deployKubeProxy specifies whether or not a standalone kube-proxy should + // be deployed by the operator. Some network providers include kube-proxy + // or similar functionality. If unset, the plugin will attempt to select + // the correct value, which is false when OpenShift SDN and ovn-kubernetes are + // used and true otherwise. + // +optional + DeployKubeProxy *bool `json:"deployKubeProxy,omitempty"` + + // disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck + // CRs from a test pod to every node, apiserver and LB should be disabled or not. + // If unset, this property defaults to 'false' and network diagnostics is enabled. + // Setting this to 'true' would reduce the additional load of the pods performing the checks. + // +optional + // +kubebuilder:default:=false + DisableNetworkDiagnostics bool `json:"disableNetworkDiagnostics"` + + // kubeProxyConfig lets us configure desired proxy configuration. + // If not specified, sensible defaults will be chosen by OpenShift directly. + // Not consumed by all network providers - currently only openshift-sdn. + KubeProxyConfig *ProxyConfig `json:"kubeProxyConfig,omitempty"` + + // exportNetworkFlows enables and configures the export of network flow metadata from the pod network + // by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. + // If unset, flows will not be exported to any collector. + // +optional + ExportNetworkFlows *ExportNetworkFlows `json:"exportNetworkFlows,omitempty"` + + // migration enables and configures the cluster network migration. + // Setting this to the target network type to allow changing the default network. + // If unset, the operation of changing cluster default network plugin will be rejected. + // +optional + Migration *NetworkMigration `json:"migration,omitempty"` +} + +// NetworkMigration represents the cluster network configuration. +type NetworkMigration struct { + // networkType is the target type of network migration + // The supported values are OpenShiftSDN, OVNKubernetes + NetworkType NetworkType `json:"networkType"` +} + +// ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size +// HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If +// the HostPrefix field is not used by the plugin, it can be left unset. +// Not all network providers support multiple ClusterNetworks +type ClusterNetworkEntry struct { + CIDR string `json:"cidr"` + // +kubebuilder:validation:Minimum=0 + // +optional + HostPrefix uint32 `json:"hostPrefix,omitempty"` +} + +// DefaultNetworkDefinition represents a single network plugin's configuration. +// type must be specified, along with exactly one "Config" that matches the type. +type DefaultNetworkDefinition struct { + // type is the type of network + // All NetworkTypes are supported except for NetworkTypeRaw + Type NetworkType `json:"type"` + + // openShiftSDNConfig configures the openshift-sdn plugin + // +optional + OpenShiftSDNConfig *OpenShiftSDNConfig `json:"openshiftSDNConfig,omitempty"` + + // oVNKubernetesConfig configures the ovn-kubernetes plugin. This is currently + // not implemented. + // +optional + OVNKubernetesConfig *OVNKubernetesConfig `json:"ovnKubernetesConfig,omitempty"` + + // KuryrConfig configures the kuryr plugin + // +optional + KuryrConfig *KuryrConfig `json:"kuryrConfig,omitempty"` +} + +// SimpleMacvlanConfig contains configurations for macvlan interface. +type SimpleMacvlanConfig struct { + // master is the host interface to create the macvlan interface from. + // If not specified, it will be default route interface + // +optional + Master string `json:"master,omitempty"` + + // IPAMConfig configures IPAM module will be used for IP Address Management (IPAM). + // +optional + IPAMConfig *IPAMConfig `json:"ipamConfig,omitempty"` + + // mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge + // +optional + Mode MacvlanMode `json:"mode,omitempty"` + + // mtu is the mtu to use for the macvlan interface. if unset, host's + // kernel will select the value. + // +kubebuilder:validation:Minimum=0 + // +optional + MTU uint32 `json:"mtu,omitempty"` +} + +// StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses +type StaticIPAMAddresses struct { + // Address is the IP address in CIDR format + // +optional + Address string `json:"address"` + // Gateway is IP inside of subnet to designate as the gateway + // +optional + Gateway string `json:"gateway,omitempty"` +} + +// StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes +type StaticIPAMRoutes struct { + // Destination points the IP route destination + Destination string `json:"destination"` + // Gateway is the route's next-hop IP address + // If unset, a default gateway is assumed (as determined by the CNI plugin). + // +optional + Gateway string `json:"gateway,omitempty"` +} + +// StaticIPAMDNS provides DNS related information for static IPAM +type StaticIPAMDNS struct { + // Nameservers points DNS servers for IP lookup + // +optional + Nameservers []string `json:"nameservers,omitempty"` + // Domain configures the domainname the local domain used for short hostname lookups + // +optional + Domain string `json:"domain,omitempty"` + // Search configures priority ordered search domains for short hostname lookups + // +optional + Search []string `json:"search,omitempty"` +} + +// StaticIPAMConfig contains configurations for static IPAM (IP Address Management) +type StaticIPAMConfig struct { + // Addresses configures IP address for the interface + // +optional + Addresses []StaticIPAMAddresses `json:"addresses,omitempty"` + // Routes configures IP routes for the interface + // +optional + Routes []StaticIPAMRoutes `json:"routes,omitempty"` + // DNS configures DNS for the interface + // +optional + DNS *StaticIPAMDNS `json:"dns,omitempty"` +} + +// IPAMConfig contains configurations for IPAM (IP Address Management) +type IPAMConfig struct { + // Type is the type of IPAM module will be used for IP Address Management(IPAM). + // The supported values are IPAMTypeDHCP, IPAMTypeStatic + Type IPAMType `json:"type"` + + // StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic + // +optional + StaticIPAMConfig *StaticIPAMConfig `json:"staticIPAMConfig,omitempty"` +} + +// AdditionalNetworkDefinition configures an extra network that is available but not +// created by default. Instead, pods must request them by name. +// type must be specified, along with exactly one "Config" that matches the type. +type AdditionalNetworkDefinition struct { + // type is the type of network + // The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan + Type NetworkType `json:"type"` + + // name is the name of the network. This will be populated in the resulting CRD + // This must be unique. + Name string `json:"name"` + + // namespace is the namespace of the network. This will be populated in the resulting CRD + // If not given the network will be created in the default namespace. + Namespace string `json:"namespace,omitempty"` + + // rawCNIConfig is the raw CNI configuration json to create in the + // NetworkAttachmentDefinition CRD + RawCNIConfig string `json:"rawCNIConfig,omitempty"` + + // SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan + // +optional + SimpleMacvlanConfig *SimpleMacvlanConfig `json:"simpleMacvlanConfig,omitempty"` +} + +// OpenShiftSDNConfig configures the three openshift-sdn plugins +type OpenShiftSDNConfig struct { + // mode is one of "Multitenant", "Subnet", or "NetworkPolicy" + Mode SDNMode `json:"mode"` + + // vxlanPort is the port to use for all vxlan packets. The default is 4789. + // +kubebuilder:validation:Minimum=0 + // +optional + VXLANPort *uint32 `json:"vxlanPort,omitempty"` + + // mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. + // This must be 50 bytes smaller than the machine's uplink. + // +kubebuilder:validation:Minimum=0 + // +optional + MTU *uint32 `json:"mtu,omitempty"` + + // useExternalOpenvswitch used to control whether the operator would deploy an OVS + // DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always + // run as a system service, and this flag is ignored. + // DEPRECATED: non-functional as of 4.6 + // +optional + UseExternalOpenvswitch *bool `json:"useExternalOpenvswitch,omitempty"` + + // enableUnidling controls whether or not the service proxy will support idling + // and unidling of services. By default, unidling is enabled. + EnableUnidling *bool `json:"enableUnidling,omitempty"` +} + +// KuryrConfig configures the Kuryr-Kubernetes SDN +type KuryrConfig struct { + // The port kuryr-daemon will listen for readiness and liveness requests. + // +kubebuilder:validation:Minimum=0 + // +optional + DaemonProbesPort *uint32 `json:"daemonProbesPort,omitempty"` + + // The port kuryr-controller will listen for readiness and liveness requests. + // +kubebuilder:validation:Minimum=0 + // +optional + ControllerProbesPort *uint32 `json:"controllerProbesPort,omitempty"` + + // openStackServiceNetwork contains the CIDR of network from which to allocate IPs for + // OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses + // two IPs from that network for each loadbalancer - one given by OpenShift and second + // for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's + // IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` + // needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` + // must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then + // make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that + // are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set + // cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix + // size by 1. + // +optional + OpenStackServiceNetwork string `json:"openStackServiceNetwork,omitempty"` + + // enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port + // pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact + // that it takes a significant amount of time to create one. Instead of creating it when + // pod is being deployed, Kuryr keeps a number of ports ready to be attached to pods. By + // default port prepopulation is disabled. + // +optional + EnablePortPoolsPrepopulation bool `json:"enablePortPoolsPrepopulation,omitempty"` + + // poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. + // If the number of ports exceeds this setting, free ports will get deleted. Setting 0 + // will disable this upper bound, effectively preventing pools from shrinking and this + // is the default value. For more information about port pools see + // enablePortPoolsPrepopulation setting. + // +kubebuilder:validation:Minimum=0 + // +optional + PoolMaxPorts uint `json:"poolMaxPorts,omitempty"` + + // poolMinPorts sets a minimum number of free ports that should be kept in a port pool. + // If the number of ports is lower than this setting, new ports will get created and + // added to pool. The default is 1. For more information about port pools see + // enablePortPoolsPrepopulation setting. + // +kubebuilder:validation:Minimum=1 + // +optional + PoolMinPorts uint `json:"poolMinPorts,omitempty"` + + // poolBatchPorts sets a number of ports that should be created in a single batch request + // to extend the port pool. The default is 3. For more information about port pools see + // enablePortPoolsPrepopulation setting. + // +kubebuilder:validation:Minimum=0 + // +optional + PoolBatchPorts *uint `json:"poolBatchPorts,omitempty"` + + // mtu is the MTU that Kuryr should use when creating pod networks in Neutron. + // The value has to be lower or equal to the MTU of the nodes network and Neutron has + // to allow creation of tenant networks with such MTU. If unset Pod networks will be + // created with the same MTU as the nodes network has. + // +kubebuilder:validation:Minimum=0 + // +optional + MTU *uint32 `json:"mtu,omitempty"` +} + +// ovnKubernetesConfig contains the configuration parameters for networks +// using the ovn-kubernetes network project +type OVNKubernetesConfig struct { + // mtu is the MTU to use for the tunnel interface. This must be 100 + // bytes smaller than the uplink mtu. + // Default is 1400 + // +kubebuilder:validation:Minimum=0 + // +optional + MTU *uint32 `json:"mtu,omitempty"` + // geneve port is the UDP port to be used by geneve encapulation. + // Default is 6081 + // +kubebuilder:validation:Minimum=1 + // +optional + GenevePort *uint32 `json:"genevePort,omitempty"` + // HybridOverlayConfig configures an additional overlay network for peers that are + // not using OVN. + // +optional + HybridOverlayConfig *HybridOverlayConfig `json:"hybridOverlayConfig,omitempty"` + // ipsecConfig enables and configures IPsec for pods on the pod network within the + // cluster. + // +optional + IPsecConfig *IPsecConfig `json:"ipsecConfig,omitempty"` + // policyAuditConfig is the configuration for network policy audit events. If unset, + // reported defaults are used. + // +optional + PolicyAuditConfig *PolicyAuditConfig `json:"policyAuditConfig,omitempty"` +} + +type HybridOverlayConfig struct { + // HybridClusterNetwork defines a network space given to nodes on an additional overlay network. + HybridClusterNetwork []ClusterNetworkEntry `json:"hybridClusterNetwork"` + // HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. + // Default is 4789 + // +optional + HybridOverlayVXLANPort *uint32 `json:"hybridOverlayVXLANPort,omitempty"` +} + +type IPsecConfig struct { +} + +type ExportNetworkFlows struct { + // netFlow defines the NetFlow configuration. + // +optional + NetFlow *NetFlowConfig `json:"netFlow,omitempty"` + // sFlow defines the SFlow configuration. + // +optional + SFlow *SFlowConfig `json:"sFlow,omitempty"` + // ipfix defines IPFIX configuration. + // +optional + IPFIX *IPFIXConfig `json:"ipfix,omitempty"` +} + +type NetFlowConfig struct { + // netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. + // It is a list of strings formatted as ip:port with a maximum of ten items + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + Collectors []IPPort `json:"collectors,omitempty"` +} + +type SFlowConfig struct { + // sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + Collectors []IPPort `json:"collectors,omitempty"` +} + +type IPFIXConfig struct { + // ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + Collectors []IPPort `json:"collectors,omitempty"` +} + +// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` +type IPPort string + +type PolicyAuditConfig struct { + // rateLimit is the approximate maximum number of messages to generate per-second per-node. If + // unset the default of 20 msg/sec is used. + // +kubebuilder:default=20 + // +kubebuilder:validation:Minimum=1 + // +optional + RateLimit *uint32 `json:"rateLimit,omitempty"` + + // maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs + // Units are in MB and the Default is 50MB + // +kubebuilder:default=50 + // +kubebuilder:validation:Minimum=1 + // +optional + MaxFileSize *uint32 `json:"maxFileSize,omitempty"` + + // destination is the location for policy log messages. + // Regardless of this config, persistent logs will always be dumped to the host + // at /var/log/ovn/ however + // Additionally syslog output may be configured as follows. + // Valid values are: + // - "libc" -> to use the libc syslog() function of the host node's journdald process + // - "udp:host:port" -> for sending syslog over UDP + // - "unix:file" -> for using the UNIX domain socket directly + // - "null" -> to discard all messages logged to syslog + // The default is "null" + // +kubebuilder:default=null + // +kubebuilder:pattern='^libc$|^null$|^udp:(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]):([0-9]){0,5}$|^unix:(\/[^\/ ]*)+([^\/\s])$' + // +optional + Destination string `json:"destination,omitempty"` + + // syslogFacility the RFC5424 facility for generated messages, e.g. "kern". Default is "local0" + // +kubebuilder:default=local0 + // +kubebuilder:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;clock;ftp;ntp;audit;alert;clock2;local0;local1;local2;local3;local4;local5;local6;local7 + // +optional + SyslogFacility string `json:"syslogFacility,omitempty"` +} + +// NetworkType describes the network plugin type to configure +type NetworkType string + +// ProxyArgumentList is a list of arguments to pass to the kubeproxy process +type ProxyArgumentList []string + +// ProxyConfig defines the configuration knobs for kubeproxy +// All of these are optional and have sensible defaults +type ProxyConfig struct { + // An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted + // in large clusters for performance reasons, but this is no longer necessary, and there is no reason + // to change this from the default value. + // Default: 30s + IptablesSyncPeriod string `json:"iptablesSyncPeriod,omitempty"` + + // The address to "bind" on + // Defaults to 0.0.0.0 + BindAddress string `json:"bindAddress,omitempty"` + + // Any additional arguments to pass to the kubeproxy process + ProxyArguments map[string]ProxyArgumentList `json:"proxyArguments,omitempty"` +} + +const ( + // NetworkTypeOpenShiftSDN means the openshift-sdn plugin will be configured + NetworkTypeOpenShiftSDN NetworkType = "OpenShiftSDN" + + // NetworkTypeOVNKubernetes means the ovn-kubernetes project will be configured. + // This is currently not implemented. + NetworkTypeOVNKubernetes NetworkType = "OVNKubernetes" + + // NetworkTypeKuryr means the kuryr-kubernetes project will be configured. + NetworkTypeKuryr NetworkType = "Kuryr" + + // NetworkTypeRaw + NetworkTypeRaw NetworkType = "Raw" + + // NetworkTypeSimpleMacvlan + NetworkTypeSimpleMacvlan NetworkType = "SimpleMacvlan" +) + +// SDNMode is the Mode the openshift-sdn plugin is in +type SDNMode string + +const ( + // SDNModeSubnet is a simple mode that offers no isolation between pods + SDNModeSubnet SDNMode = "Subnet" + + // SDNModeMultitenant is a special "multitenant" mode that offers limited + // isolation configuration between namespaces + SDNModeMultitenant SDNMode = "Multitenant" + + // SDNModeNetworkPolicy is a full NetworkPolicy implementation that allows + // for sophisticated network isolation and segmenting. This is the default. + SDNModeNetworkPolicy SDNMode = "NetworkPolicy" +) + +// MacvlanMode is the Mode of macvlan. The value are lowercase to match the CNI plugin +// config values. See "man ip-link" for its detail. +type MacvlanMode string + +const ( + // MacvlanModeBridge is the macvlan with thin bridge function. + MacvlanModeBridge MacvlanMode = "Bridge" + // MacvlanModePrivate + MacvlanModePrivate MacvlanMode = "Private" + // MacvlanModeVEPA is used with Virtual Ethernet Port Aggregator + // (802.1qbg) swtich + MacvlanModeVEPA MacvlanMode = "VEPA" + // MacvlanModePassthru + MacvlanModePassthru MacvlanMode = "Passthru" +) + +// IPAMType describes the IP address management type to configure +type IPAMType string + +const ( + // IPAMTypeDHCP uses DHCP for IP management + IPAMTypeDHCP IPAMType = "DHCP" + // IPAMTypeStatic uses static IP + IPAMTypeStatic IPAMType = "Static" +) diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go new file mode 100644 index 0000000000..8ab50ed321 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go @@ -0,0 +1,50 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver. +type OpenShiftAPIServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the OpenShift API Server. + // +kubebuilder:validation:Required + // +required + Spec OpenShiftAPIServerSpec `json:"spec"` + + // status defines the observed status of the OpenShift API Server. + // +optional + Status OpenShiftAPIServerStatus `json:"status"` +} + +type OpenShiftAPIServerSpec struct { + OperatorSpec `json:",inline"` +} + +type OpenShiftAPIServerStatus struct { + OperatorStatus `json:",inline"` + + // latestAvailableRevision is the latest revision used as suffix of revisioned + // secrets like encryption-config. A new revision causes a new deployment of + // pods. + // +optional + // +kubebuilder:validation:Minimum=0 + LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftAPIServerList is a collection of items +type OpenShiftAPIServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []OpenShiftAPIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go new file mode 100644 index 0000000000..0f23b01be2 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go @@ -0,0 +1,40 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager. +type OpenShiftControllerManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec OpenShiftControllerManagerSpec `json:"spec"` + // +optional + Status OpenShiftControllerManagerStatus `json:"status"` +} + +type OpenShiftControllerManagerSpec struct { + OperatorSpec `json:",inline"` +} + +type OpenShiftControllerManagerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftControllerManagerList is a collection of items +type OpenShiftControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []OpenShiftControllerManager `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go new file mode 100644 index 0000000000..f8a542082c --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go @@ -0,0 +1,43 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeScheduler provides information to configure an operator to manage scheduler. +type KubeScheduler struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes Scheduler + // +kubebuilder:validation:Required + // +required + Spec KubeSchedulerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes Scheduler + // +optional + Status KubeSchedulerStatus `json:"status"` +} + +type KubeSchedulerSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type KubeSchedulerStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeSchedulerList is a collection of items +type KubeSchedulerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []KubeScheduler `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go new file mode 100644 index 0000000000..b8d5e2646a --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go @@ -0,0 +1,42 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCA provides information to configure an operator to manage the service cert controllers +type ServiceCA struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + //spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ServiceCASpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ServiceCAStatus `json:"status"` +} + +type ServiceCASpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCAStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCAList is a collection of items +type ServiceCAList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []ServiceCA `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go new file mode 100644 index 0000000000..4dc98f4a4d --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go @@ -0,0 +1,42 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server +// DEPRECATED: will be removed in 4.6 +type ServiceCatalogAPIServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +required + Spec ServiceCatalogAPIServerSpec `json:"spec"` + // +optional + Status ServiceCatalogAPIServerStatus `json:"status"` +} + +type ServiceCatalogAPIServerSpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCatalogAPIServerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogAPIServerList is a collection of items +// DEPRECATED: will be removed in 4.6 +type ServiceCatalogAPIServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []ServiceCatalogAPIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go new file mode 100644 index 0000000000..f4cc3f6957 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go @@ -0,0 +1,42 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager +// DEPRECATED: will be removed in 4.6 +type ServiceCatalogControllerManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec ServiceCatalogControllerManagerSpec `json:"spec"` + // +optional + Status ServiceCatalogControllerManagerStatus `json:"status"` +} + +type ServiceCatalogControllerManagerSpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCatalogControllerManagerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogControllerManagerList is a collection of items +// DEPRECATED: will be removed in 4.6 +type ServiceCatalogControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []ServiceCatalogControllerManager `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_storage.go b/vendor/github.com/openshift/api/operator/v1/types_storage.go new file mode 100644 index 0000000000..d5d3bd407e --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_storage.go @@ -0,0 +1,44 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name. +type Storage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec StorageSpec `json:"spec"` + + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status StorageStatus `json:"status"` +} + +// StorageSpec is the specification of the desired behavior of the cluster storage operator. +type StorageSpec struct { + OperatorSpec `json:",inline"` +} + +// StorageStatus defines the observed status of the cluster storage operator. +type StorageStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// StorageList contains a list of Storages. +type StorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Storage `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..befbfb16c3 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -0,0 +1,3565 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClassicLoadBalancerParameters) DeepCopyInto(out *AWSClassicLoadBalancerParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClassicLoadBalancerParameters. +func (in *AWSClassicLoadBalancerParameters) DeepCopy() *AWSClassicLoadBalancerParameters { + if in == nil { + return nil + } + out := new(AWSClassicLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSLoadBalancerParameters) DeepCopyInto(out *AWSLoadBalancerParameters) { + *out = *in + if in.ClassicLoadBalancerParameters != nil { + in, out := &in.ClassicLoadBalancerParameters, &out.ClassicLoadBalancerParameters + *out = new(AWSClassicLoadBalancerParameters) + **out = **in + } + if in.NetworkLoadBalancerParameters != nil { + in, out := &in.NetworkLoadBalancerParameters, &out.NetworkLoadBalancerParameters + *out = new(AWSNetworkLoadBalancerParameters) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSLoadBalancerParameters. +func (in *AWSLoadBalancerParameters) DeepCopy() *AWSLoadBalancerParameters { + if in == nil { + return nil + } + out := new(AWSLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSNetworkLoadBalancerParameters) DeepCopyInto(out *AWSNetworkLoadBalancerParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSNetworkLoadBalancerParameters. +func (in *AWSNetworkLoadBalancerParameters) DeepCopy() *AWSNetworkLoadBalancerParameters { + if in == nil { + return nil + } + out := new(AWSNetworkLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogging) DeepCopyInto(out *AccessLogging) { + *out = *in + in.Destination.DeepCopyInto(&out.Destination) + in.HTTPCaptureHeaders.DeepCopyInto(&out.HTTPCaptureHeaders) + if in.HTTPCaptureCookies != nil { + in, out := &in.HTTPCaptureCookies, &out.HTTPCaptureCookies + *out = make([]IngressControllerCaptureHTTPCookie, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogging. +func (in *AccessLogging) DeepCopy() *AccessLogging { + if in == nil { + return nil + } + out := new(AccessLogging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddPage) DeepCopyInto(out *AddPage) { + *out = *in + if in.DisabledActions != nil { + in, out := &in.DisabledActions, &out.DisabledActions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddPage. +func (in *AddPage) DeepCopy() *AddPage { + if in == nil { + return nil + } + out := new(AddPage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalNetworkDefinition) DeepCopyInto(out *AdditionalNetworkDefinition) { + *out = *in + if in.SimpleMacvlanConfig != nil { + in, out := &in.SimpleMacvlanConfig, &out.SimpleMacvlanConfig + *out = new(SimpleMacvlanConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalNetworkDefinition. +func (in *AdditionalNetworkDefinition) DeepCopy() *AdditionalNetworkDefinition { + if in == nil { + return nil + } + out := new(AdditionalNetworkDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authentication) DeepCopyInto(out *Authentication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. +func (in *Authentication) DeepCopy() *Authentication { + if in == nil { + return nil + } + out := new(Authentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Authentication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Authentication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList. +func (in *AuthenticationList) DeepCopy() *AuthenticationList { + if in == nil { + return nil + } + out := new(AuthenticationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. +func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { + if in == nil { + return nil + } + out := new(AuthenticationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) { + *out = *in + out.OAuthAPIServer = in.OAuthAPIServer + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus. +func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { + if in == nil { + return nil + } + out := new(AuthenticationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotController) DeepCopyInto(out *CSISnapshotController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotController. +func (in *CSISnapshotController) DeepCopy() *CSISnapshotController { + if in == nil { + return nil + } + out := new(CSISnapshotController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSISnapshotController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerList) DeepCopyInto(out *CSISnapshotControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSISnapshotController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerList. +func (in *CSISnapshotControllerList) DeepCopy() *CSISnapshotControllerList { + if in == nil { + return nil + } + out := new(CSISnapshotControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSISnapshotControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerSpec) DeepCopyInto(out *CSISnapshotControllerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerSpec. +func (in *CSISnapshotControllerSpec) DeepCopy() *CSISnapshotControllerSpec { + if in == nil { + return nil + } + out := new(CSISnapshotControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerStatus) DeepCopyInto(out *CSISnapshotControllerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerStatus. +func (in *CSISnapshotControllerStatus) DeepCopy() *CSISnapshotControllerStatus { + if in == nil { + return nil + } + out := new(CSISnapshotControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredential) DeepCopyInto(out *CloudCredential) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredential. +func (in *CloudCredential) DeepCopy() *CloudCredential { + if in == nil { + return nil + } + out := new(CloudCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudCredential) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredentialList) DeepCopyInto(out *CloudCredentialList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudCredential, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialList. +func (in *CloudCredentialList) DeepCopy() *CloudCredentialList { + if in == nil { + return nil + } + out := new(CloudCredentialList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudCredentialList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredentialSpec) DeepCopyInto(out *CloudCredentialSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialSpec. +func (in *CloudCredentialSpec) DeepCopy() *CloudCredentialSpec { + if in == nil { + return nil + } + out := new(CloudCredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredentialStatus) DeepCopyInto(out *CloudCredentialStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialStatus. +func (in *CloudCredentialStatus) DeepCopy() *CloudCredentialStatus { + if in == nil { + return nil + } + out := new(CloudCredentialStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriver) DeepCopyInto(out *ClusterCSIDriver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriver. +func (in *ClusterCSIDriver) DeepCopy() *ClusterCSIDriver { + if in == nil { + return nil + } + out := new(ClusterCSIDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCSIDriver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriverList) DeepCopyInto(out *ClusterCSIDriverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterCSIDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverList. +func (in *ClusterCSIDriverList) DeepCopy() *ClusterCSIDriverList { + if in == nil { + return nil + } + out := new(ClusterCSIDriverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCSIDriverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriverSpec) DeepCopyInto(out *ClusterCSIDriverSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverSpec. +func (in *ClusterCSIDriverSpec) DeepCopy() *ClusterCSIDriverSpec { + if in == nil { + return nil + } + out := new(ClusterCSIDriverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriverStatus) DeepCopyInto(out *ClusterCSIDriverStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverStatus. +func (in *ClusterCSIDriverStatus) DeepCopy() *ClusterCSIDriverStatus { + if in == nil { + return nil + } + out := new(ClusterCSIDriverStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigList) DeepCopyInto(out *ConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Config, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList. +func (in *ConfigList) DeepCopy() *ConfigList { + if in == nil { + return nil + } + out := new(ConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec. +func (in *ConfigSpec) DeepCopy() *ConfigSpec { + if in == nil { + return nil + } + out := new(ConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus. +func (in *ConfigStatus) DeepCopy() *ConfigStatus { + if in == nil { + return nil + } + out := new(ConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Console) DeepCopyInto(out *Console) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console. +func (in *Console) DeepCopy() *Console { + if in == nil { + return nil + } + out := new(Console) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Console) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleConfigRoute) DeepCopyInto(out *ConsoleConfigRoute) { + *out = *in + out.Secret = in.Secret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleConfigRoute. +func (in *ConsoleConfigRoute) DeepCopy() *ConsoleConfigRoute { + if in == nil { + return nil + } + out := new(ConsoleConfigRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleCustomization) DeepCopyInto(out *ConsoleCustomization) { + *out = *in + out.CustomLogoFile = in.CustomLogoFile + in.DeveloperCatalog.DeepCopyInto(&out.DeveloperCatalog) + in.ProjectAccess.DeepCopyInto(&out.ProjectAccess) + in.QuickStarts.DeepCopyInto(&out.QuickStarts) + in.AddPage.DeepCopyInto(&out.AddPage) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleCustomization. +func (in *ConsoleCustomization) DeepCopy() *ConsoleCustomization { + if in == nil { + return nil + } + out := new(ConsoleCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleList) DeepCopyInto(out *ConsoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Console, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList. +func (in *ConsoleList) DeepCopy() *ConsoleList { + if in == nil { + return nil + } + out := new(ConsoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleProviders) DeepCopyInto(out *ConsoleProviders) { + *out = *in + if in.Statuspage != nil { + in, out := &in.Statuspage, &out.Statuspage + *out = new(StatuspageProvider) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleProviders. +func (in *ConsoleProviders) DeepCopy() *ConsoleProviders { + if in == nil { + return nil + } + out := new(ConsoleProviders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + in.Customization.DeepCopyInto(&out.Customization) + in.Providers.DeepCopyInto(&out.Providers) + out.Route = in.Route + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec. +func (in *ConsoleSpec) DeepCopy() *ConsoleSpec { + if in == nil { + return nil + } + out := new(ConsoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus. +func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { + if in == nil { + return nil + } + out := new(ConsoleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerLoggingDestinationParameters) DeepCopyInto(out *ContainerLoggingDestinationParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerLoggingDestinationParameters. +func (in *ContainerLoggingDestinationParameters) DeepCopy() *ContainerLoggingDestinationParameters { + if in == nil { + return nil + } + out := new(ContainerLoggingDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSList) DeepCopyInto(out *DNSList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList. +func (in *DNSList) DeepCopy() *DNSList { + if in == nil { + return nil + } + out := new(DNSList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNodePlacement) DeepCopyInto(out *DNSNodePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNodePlacement. +func (in *DNSNodePlacement) DeepCopy() *DNSNodePlacement { + if in == nil { + return nil + } + out := new(DNSNodePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { + *out = *in + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]Server, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.NodePlacement.DeepCopyInto(&out.NodePlacement) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. +func (in *DNSSpec) DeepCopy() *DNSSpec { + if in == nil { + return nil + } + out := new(DNSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSStatus) DeepCopyInto(out *DNSStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus. +func (in *DNSStatus) DeepCopy() *DNSStatus { + if in == nil { + return nil + } + out := new(DNSStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultNetworkDefinition) DeepCopyInto(out *DefaultNetworkDefinition) { + *out = *in + if in.OpenShiftSDNConfig != nil { + in, out := &in.OpenShiftSDNConfig, &out.OpenShiftSDNConfig + *out = new(OpenShiftSDNConfig) + (*in).DeepCopyInto(*out) + } + if in.OVNKubernetesConfig != nil { + in, out := &in.OVNKubernetesConfig, &out.OVNKubernetesConfig + *out = new(OVNKubernetesConfig) + (*in).DeepCopyInto(*out) + } + if in.KuryrConfig != nil { + in, out := &in.KuryrConfig, &out.KuryrConfig + *out = new(KuryrConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultNetworkDefinition. +func (in *DefaultNetworkDefinition) DeepCopy() *DefaultNetworkDefinition { + if in == nil { + return nil + } + out := new(DefaultNetworkDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConsoleCatalogCategory) DeepCopyInto(out *DeveloperConsoleCatalogCategory) { + *out = *in + in.DeveloperConsoleCatalogCategoryMeta.DeepCopyInto(&out.DeveloperConsoleCatalogCategoryMeta) + if in.Subcategories != nil { + in, out := &in.Subcategories, &out.Subcategories + *out = make([]DeveloperConsoleCatalogCategoryMeta, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCategory. +func (in *DeveloperConsoleCatalogCategory) DeepCopy() *DeveloperConsoleCatalogCategory { + if in == nil { + return nil + } + out := new(DeveloperConsoleCatalogCategory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConsoleCatalogCategoryMeta) DeepCopyInto(out *DeveloperConsoleCatalogCategoryMeta) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCategoryMeta. +func (in *DeveloperConsoleCatalogCategoryMeta) DeepCopy() *DeveloperConsoleCatalogCategoryMeta { + if in == nil { + return nil + } + out := new(DeveloperConsoleCatalogCategoryMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConsoleCatalogCustomization) DeepCopyInto(out *DeveloperConsoleCatalogCustomization) { + *out = *in + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]DeveloperConsoleCatalogCategory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCustomization. +func (in *DeveloperConsoleCatalogCustomization) DeepCopy() *DeveloperConsoleCatalogCustomization { + if in == nil { + return nil + } + out := new(DeveloperConsoleCatalogCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPublishingStrategy) DeepCopyInto(out *EndpointPublishingStrategy) { + *out = *in + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerStrategy) + (*in).DeepCopyInto(*out) + } + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(HostNetworkStrategy) + **out = **in + } + if in.Private != nil { + in, out := &in.Private, &out.Private + *out = new(PrivateStrategy) + **out = **in + } + if in.NodePort != nil { + in, out := &in.NodePort, &out.NodePort + *out = new(NodePortStrategy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPublishingStrategy. +func (in *EndpointPublishingStrategy) DeepCopy() *EndpointPublishingStrategy { + if in == nil { + return nil + } + out := new(EndpointPublishingStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Etcd) DeepCopyInto(out *Etcd) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Etcd. +func (in *Etcd) DeepCopy() *Etcd { + if in == nil { + return nil + } + out := new(Etcd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Etcd) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdList) DeepCopyInto(out *EtcdList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Etcd, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdList. +func (in *EtcdList) DeepCopy() *EtcdList { + if in == nil { + return nil + } + out := new(EtcdList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EtcdList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdSpec. +func (in *EtcdSpec) DeepCopy() *EtcdSpec { + if in == nil { + return nil + } + out := new(EtcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdStatus) DeepCopyInto(out *EtcdStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStatus. +func (in *EtcdStatus) DeepCopy() *EtcdStatus { + if in == nil { + return nil + } + out := new(EtcdStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportNetworkFlows) DeepCopyInto(out *ExportNetworkFlows) { + *out = *in + if in.NetFlow != nil { + in, out := &in.NetFlow, &out.NetFlow + *out = new(NetFlowConfig) + (*in).DeepCopyInto(*out) + } + if in.SFlow != nil { + in, out := &in.SFlow, &out.SFlow + *out = new(SFlowConfig) + (*in).DeepCopyInto(*out) + } + if in.IPFIX != nil { + in, out := &in.IPFIX, &out.IPFIX + *out = new(IPFIXConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportNetworkFlows. +func (in *ExportNetworkFlows) DeepCopy() *ExportNetworkFlows { + if in == nil { + return nil + } + out := new(ExportNetworkFlows) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardPlugin) DeepCopyInto(out *ForwardPlugin) { + *out = *in + if in.Upstreams != nil { + in, out := &in.Upstreams, &out.Upstreams + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardPlugin. +func (in *ForwardPlugin) DeepCopy() *ForwardPlugin { + if in == nil { + return nil + } + out := new(ForwardPlugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPLoadBalancerParameters) DeepCopyInto(out *GCPLoadBalancerParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPLoadBalancerParameters. +func (in *GCPLoadBalancerParameters) DeepCopy() *GCPLoadBalancerParameters { + if in == nil { + return nil + } + out := new(GCPLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenerationStatus) DeepCopyInto(out *GenerationStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationStatus. +func (in *GenerationStatus) DeepCopy() *GenerationStatus { + if in == nil { + return nil + } + out := new(GenerationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostNetworkStrategy) DeepCopyInto(out *HostNetworkStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNetworkStrategy. +func (in *HostNetworkStrategy) DeepCopy() *HostNetworkStrategy { + if in == nil { + return nil + } + out := new(HostNetworkStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HybridOverlayConfig) DeepCopyInto(out *HybridOverlayConfig) { + *out = *in + if in.HybridClusterNetwork != nil { + in, out := &in.HybridClusterNetwork, &out.HybridClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.HybridOverlayVXLANPort != nil { + in, out := &in.HybridOverlayVXLANPort, &out.HybridOverlayVXLANPort + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HybridOverlayConfig. +func (in *HybridOverlayConfig) DeepCopy() *HybridOverlayConfig { + if in == nil { + return nil + } + out := new(HybridOverlayConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMConfig) DeepCopyInto(out *IPAMConfig) { + *out = *in + if in.StaticIPAMConfig != nil { + in, out := &in.StaticIPAMConfig, &out.StaticIPAMConfig + *out = new(StaticIPAMConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfig. +func (in *IPAMConfig) DeepCopy() *IPAMConfig { + if in == nil { + return nil + } + out := new(IPAMConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPFIXConfig) DeepCopyInto(out *IPFIXConfig) { + *out = *in + if in.Collectors != nil { + in, out := &in.Collectors, &out.Collectors + *out = make([]IPPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPFIXConfig. +func (in *IPFIXConfig) DeepCopy() *IPFIXConfig { + if in == nil { + return nil + } + out := new(IPFIXConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPsecConfig) DeepCopyInto(out *IPsecConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPsecConfig. +func (in *IPsecConfig) DeepCopy() *IPsecConfig { + if in == nil { + return nil + } + out := new(IPsecConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressController) DeepCopyInto(out *IngressController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressController. +func (in *IngressController) DeepCopy() *IngressController { + if in == nil { + return nil + } + out := new(IngressController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPCookie) DeepCopyInto(out *IngressControllerCaptureHTTPCookie) { + *out = *in + out.IngressControllerCaptureHTTPCookieUnion = in.IngressControllerCaptureHTTPCookieUnion + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPCookie. +func (in *IngressControllerCaptureHTTPCookie) DeepCopy() *IngressControllerCaptureHTTPCookie { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPCookie) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPCookieUnion) DeepCopyInto(out *IngressControllerCaptureHTTPCookieUnion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPCookieUnion. +func (in *IngressControllerCaptureHTTPCookieUnion) DeepCopy() *IngressControllerCaptureHTTPCookieUnion { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPCookieUnion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPHeader) DeepCopyInto(out *IngressControllerCaptureHTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPHeader. +func (in *IngressControllerCaptureHTTPHeader) DeepCopy() *IngressControllerCaptureHTTPHeader { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPHeaders) DeepCopyInto(out *IngressControllerCaptureHTTPHeaders) { + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]IngressControllerCaptureHTTPHeader, len(*in)) + copy(*out, *in) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = make([]IngressControllerCaptureHTTPHeader, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPHeaders. +func (in *IngressControllerCaptureHTTPHeaders) DeepCopy() *IngressControllerCaptureHTTPHeaders { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPHeaders) DeepCopyInto(out *IngressControllerHTTPHeaders) { + *out = *in + out.UniqueId = in.UniqueId + if in.HeaderNameCaseAdjustments != nil { + in, out := &in.HeaderNameCaseAdjustments, &out.HeaderNameCaseAdjustments + *out = make([]IngressControllerHTTPHeaderNameCaseAdjustment, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeaders. +func (in *IngressControllerHTTPHeaders) DeepCopy() *IngressControllerHTTPHeaders { + if in == nil { + return nil + } + out := new(IngressControllerHTTPHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPUniqueIdHeaderPolicy) DeepCopyInto(out *IngressControllerHTTPUniqueIdHeaderPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPUniqueIdHeaderPolicy. +func (in *IngressControllerHTTPUniqueIdHeaderPolicy) DeepCopy() *IngressControllerHTTPUniqueIdHeaderPolicy { + if in == nil { + return nil + } + out := new(IngressControllerHTTPUniqueIdHeaderPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerList) DeepCopyInto(out *IngressControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IngressController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerList. +func (in *IngressControllerList) DeepCopy() *IngressControllerList { + if in == nil { + return nil + } + out := new(IngressControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerLogging) DeepCopyInto(out *IngressControllerLogging) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = new(AccessLogging) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerLogging. +func (in *IngressControllerLogging) DeepCopy() *IngressControllerLogging { + if in == nil { + return nil + } + out := new(IngressControllerLogging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerSpec) DeepCopyInto(out *IngressControllerSpec) { + *out = *in + out.HttpErrorCodePages = in.HttpErrorCodePages + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.EndpointPublishingStrategy != nil { + in, out := &in.EndpointPublishingStrategy, &out.EndpointPublishingStrategy + *out = new(EndpointPublishingStrategy) + (*in).DeepCopyInto(*out) + } + if in.DefaultCertificate != nil { + in, out := &in.DefaultCertificate, &out.DefaultCertificate + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RouteSelector != nil { + in, out := &in.RouteSelector, &out.RouteSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.NodePlacement != nil { + in, out := &in.NodePlacement, &out.NodePlacement + *out = new(NodePlacement) + (*in).DeepCopyInto(*out) + } + if in.TLSSecurityProfile != nil { + in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile + *out = new(configv1.TLSSecurityProfile) + (*in).DeepCopyInto(*out) + } + if in.RouteAdmission != nil { + in, out := &in.RouteAdmission, &out.RouteAdmission + *out = new(RouteAdmissionPolicy) + **out = **in + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(IngressControllerLogging) + (*in).DeepCopyInto(*out) + } + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = new(IngressControllerHTTPHeaders) + (*in).DeepCopyInto(*out) + } + out.TuningOptions = in.TuningOptions + in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerSpec. +func (in *IngressControllerSpec) DeepCopy() *IngressControllerSpec { + if in == nil { + return nil + } + out := new(IngressControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerStatus) DeepCopyInto(out *IngressControllerStatus) { + *out = *in + if in.EndpointPublishingStrategy != nil { + in, out := &in.EndpointPublishingStrategy, &out.EndpointPublishingStrategy + *out = new(EndpointPublishingStrategy) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSProfile != nil { + in, out := &in.TLSProfile, &out.TLSProfile + *out = new(configv1.TLSProfileSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerStatus. +func (in *IngressControllerStatus) DeepCopy() *IngressControllerStatus { + if in == nil { + return nil + } + out := new(IngressControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerTuningOptions) DeepCopyInto(out *IngressControllerTuningOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerTuningOptions. +func (in *IngressControllerTuningOptions) DeepCopy() *IngressControllerTuningOptions { + if in == nil { + return nil + } + out := new(IngressControllerTuningOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServer) DeepCopyInto(out *KubeAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServer. +func (in *KubeAPIServer) DeepCopy() *KubeAPIServer { + if in == nil { + return nil + } + out := new(KubeAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerList) DeepCopyInto(out *KubeAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerList. +func (in *KubeAPIServerList) DeepCopy() *KubeAPIServerList { + if in == nil { + return nil + } + out := new(KubeAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerSpec) DeepCopyInto(out *KubeAPIServerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerSpec. +func (in *KubeAPIServerSpec) DeepCopy() *KubeAPIServerSpec { + if in == nil { + return nil + } + out := new(KubeAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerStatus) DeepCopyInto(out *KubeAPIServerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerStatus. +func (in *KubeAPIServerStatus) DeepCopy() *KubeAPIServerStatus { + if in == nil { + return nil + } + out := new(KubeAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManager) DeepCopyInto(out *KubeControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManager. +func (in *KubeControllerManager) DeepCopy() *KubeControllerManager { + if in == nil { + return nil + } + out := new(KubeControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerList) DeepCopyInto(out *KubeControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerList. +func (in *KubeControllerManagerList) DeepCopy() *KubeControllerManagerList { + if in == nil { + return nil + } + out := new(KubeControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerSpec) DeepCopyInto(out *KubeControllerManagerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerSpec. +func (in *KubeControllerManagerSpec) DeepCopy() *KubeControllerManagerSpec { + if in == nil { + return nil + } + out := new(KubeControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerStatus) DeepCopyInto(out *KubeControllerManagerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerStatus. +func (in *KubeControllerManagerStatus) DeepCopy() *KubeControllerManagerStatus { + if in == nil { + return nil + } + out := new(KubeControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeScheduler) DeepCopyInto(out *KubeScheduler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeScheduler. +func (in *KubeScheduler) DeepCopy() *KubeScheduler { + if in == nil { + return nil + } + out := new(KubeScheduler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeScheduler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerList) DeepCopyInto(out *KubeSchedulerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeScheduler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerList. +func (in *KubeSchedulerList) DeepCopy() *KubeSchedulerList { + if in == nil { + return nil + } + out := new(KubeSchedulerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeSchedulerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerSpec) DeepCopyInto(out *KubeSchedulerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerSpec. +func (in *KubeSchedulerSpec) DeepCopy() *KubeSchedulerSpec { + if in == nil { + return nil + } + out := new(KubeSchedulerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerStatus) DeepCopyInto(out *KubeSchedulerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerStatus. +func (in *KubeSchedulerStatus) DeepCopy() *KubeSchedulerStatus { + if in == nil { + return nil + } + out := new(KubeSchedulerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigrator) DeepCopyInto(out *KubeStorageVersionMigrator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigrator. +func (in *KubeStorageVersionMigrator) DeepCopy() *KubeStorageVersionMigrator { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigrator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeStorageVersionMigrator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorList) DeepCopyInto(out *KubeStorageVersionMigratorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeStorageVersionMigrator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorList. +func (in *KubeStorageVersionMigratorList) DeepCopy() *KubeStorageVersionMigratorList { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeStorageVersionMigratorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorSpec) DeepCopyInto(out *KubeStorageVersionMigratorSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorSpec. +func (in *KubeStorageVersionMigratorSpec) DeepCopy() *KubeStorageVersionMigratorSpec { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorStatus) DeepCopyInto(out *KubeStorageVersionMigratorStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorStatus. +func (in *KubeStorageVersionMigratorStatus) DeepCopy() *KubeStorageVersionMigratorStatus { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KuryrConfig) DeepCopyInto(out *KuryrConfig) { + *out = *in + if in.DaemonProbesPort != nil { + in, out := &in.DaemonProbesPort, &out.DaemonProbesPort + *out = new(uint32) + **out = **in + } + if in.ControllerProbesPort != nil { + in, out := &in.ControllerProbesPort, &out.ControllerProbesPort + *out = new(uint32) + **out = **in + } + if in.PoolBatchPorts != nil { + in, out := &in.PoolBatchPorts, &out.PoolBatchPorts + *out = new(uint) + **out = **in + } + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KuryrConfig. +func (in *KuryrConfig) DeepCopy() *KuryrConfig { + if in == nil { + return nil + } + out := new(KuryrConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerStrategy) DeepCopyInto(out *LoadBalancerStrategy) { + *out = *in + if in.ProviderParameters != nil { + in, out := &in.ProviderParameters, &out.ProviderParameters + *out = new(ProviderLoadBalancerParameters) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStrategy. +func (in *LoadBalancerStrategy) DeepCopy() *LoadBalancerStrategy { + if in == nil { + return nil + } + out := new(LoadBalancerStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingDestination) DeepCopyInto(out *LoggingDestination) { + *out = *in + if in.Syslog != nil { + in, out := &in.Syslog, &out.Syslog + *out = new(SyslogLoggingDestinationParameters) + **out = **in + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(ContainerLoggingDestinationParameters) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingDestination. +func (in *LoggingDestination) DeepCopy() *LoggingDestination { + if in == nil { + return nil + } + out := new(LoggingDestination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResource) DeepCopyInto(out *MyOperatorResource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResource. +func (in *MyOperatorResource) DeepCopy() *MyOperatorResource { + if in == nil { + return nil + } + out := new(MyOperatorResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResourceSpec) DeepCopyInto(out *MyOperatorResourceSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResourceSpec. +func (in *MyOperatorResourceSpec) DeepCopy() *MyOperatorResourceSpec { + if in == nil { + return nil + } + out := new(MyOperatorResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResourceStatus) DeepCopyInto(out *MyOperatorResourceStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResourceStatus. +func (in *MyOperatorResourceStatus) DeepCopy() *MyOperatorResourceStatus { + if in == nil { + return nil + } + out := new(MyOperatorResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetFlowConfig) DeepCopyInto(out *NetFlowConfig) { + *out = *in + if in.Collectors != nil { + in, out := &in.Collectors, &out.Collectors + *out = make([]IPPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetFlowConfig. +func (in *NetFlowConfig) DeepCopy() *NetFlowConfig { + if in == nil { + return nil + } + out := new(NetFlowConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Network) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkList) DeepCopyInto(out *NetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. +func (in *NetworkList) DeepCopy() *NetworkList { + if in == nil { + return nil + } + out := new(NetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkMigration) DeepCopyInto(out *NetworkMigration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMigration. +func (in *NetworkMigration) DeepCopy() *NetworkMigration { + if in == nil { + return nil + } + out := new(NetworkMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DefaultNetwork.DeepCopyInto(&out.DefaultNetwork) + if in.AdditionalNetworks != nil { + in, out := &in.AdditionalNetworks, &out.AdditionalNetworks + *out = make([]AdditionalNetworkDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisableMultiNetwork != nil { + in, out := &in.DisableMultiNetwork, &out.DisableMultiNetwork + *out = new(bool) + **out = **in + } + if in.UseMultiNetworkPolicy != nil { + in, out := &in.UseMultiNetworkPolicy, &out.UseMultiNetworkPolicy + *out = new(bool) + **out = **in + } + if in.DeployKubeProxy != nil { + in, out := &in.DeployKubeProxy, &out.DeployKubeProxy + *out = new(bool) + **out = **in + } + if in.KubeProxyConfig != nil { + in, out := &in.KubeProxyConfig, &out.KubeProxyConfig + *out = new(ProxyConfig) + (*in).DeepCopyInto(*out) + } + if in.ExportNetworkFlows != nil { + in, out := &in.ExportNetworkFlows, &out.ExportNetworkFlows + *out = new(ExportNetworkFlows) + (*in).DeepCopyInto(*out) + } + if in.Migration != nil { + in, out := &in.Migration, &out.Migration + *out = new(NetworkMigration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePlacement) DeepCopyInto(out *NodePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePlacement. +func (in *NodePlacement) DeepCopy() *NodePlacement { + if in == nil { + return nil + } + out := new(NodePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePortStrategy) DeepCopyInto(out *NodePortStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortStrategy. +func (in *NodePortStrategy) DeepCopy() *NodePortStrategy { + if in == nil { + return nil + } + out := new(NodePortStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.LastFailedTime != nil { + in, out := &in.LastFailedTime, &out.LastFailedTime + *out = (*in).DeepCopy() + } + if in.LastFailedRevisionErrors != nil { + in, out := &in.LastFailedRevisionErrors, &out.LastFailedRevisionErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAPIServerStatus) DeepCopyInto(out *OAuthAPIServerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAPIServerStatus. +func (in *OAuthAPIServerStatus) DeepCopy() *OAuthAPIServerStatus { + if in == nil { + return nil + } + out := new(OAuthAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { + *out = *in + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + if in.GenevePort != nil { + in, out := &in.GenevePort, &out.GenevePort + *out = new(uint32) + **out = **in + } + if in.HybridOverlayConfig != nil { + in, out := &in.HybridOverlayConfig, &out.HybridOverlayConfig + *out = new(HybridOverlayConfig) + (*in).DeepCopyInto(*out) + } + if in.IPsecConfig != nil { + in, out := &in.IPsecConfig, &out.IPsecConfig + *out = new(IPsecConfig) + **out = **in + } + if in.PolicyAuditConfig != nil { + in, out := &in.PolicyAuditConfig, &out.PolicyAuditConfig + *out = new(PolicyAuditConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OVNKubernetesConfig. +func (in *OVNKubernetesConfig) DeepCopy() *OVNKubernetesConfig { + if in == nil { + return nil + } + out := new(OVNKubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServer) DeepCopyInto(out *OpenShiftAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServer. +func (in *OpenShiftAPIServer) DeepCopy() *OpenShiftAPIServer { + if in == nil { + return nil + } + out := new(OpenShiftAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerList) DeepCopyInto(out *OpenShiftAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenShiftAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerList. +func (in *OpenShiftAPIServerList) DeepCopy() *OpenShiftAPIServerList { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerSpec) DeepCopyInto(out *OpenShiftAPIServerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerSpec. +func (in *OpenShiftAPIServerSpec) DeepCopy() *OpenShiftAPIServerSpec { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerStatus) DeepCopyInto(out *OpenShiftAPIServerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerStatus. +func (in *OpenShiftAPIServerStatus) DeepCopy() *OpenShiftAPIServerStatus { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManager) DeepCopyInto(out *OpenShiftControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManager. +func (in *OpenShiftControllerManager) DeepCopy() *OpenShiftControllerManager { + if in == nil { + return nil + } + out := new(OpenShiftControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerList) DeepCopyInto(out *OpenShiftControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenShiftControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerList. +func (in *OpenShiftControllerManagerList) DeepCopy() *OpenShiftControllerManagerList { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerSpec) DeepCopyInto(out *OpenShiftControllerManagerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerSpec. +func (in *OpenShiftControllerManagerSpec) DeepCopy() *OpenShiftControllerManagerSpec { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerStatus) DeepCopyInto(out *OpenShiftControllerManagerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerStatus. +func (in *OpenShiftControllerManagerStatus) DeepCopy() *OpenShiftControllerManagerStatus { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftSDNConfig) DeepCopyInto(out *OpenShiftSDNConfig) { + *out = *in + if in.VXLANPort != nil { + in, out := &in.VXLANPort, &out.VXLANPort + *out = new(uint32) + **out = **in + } + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + if in.UseExternalOpenvswitch != nil { + in, out := &in.UseExternalOpenvswitch, &out.UseExternalOpenvswitch + *out = new(bool) + **out = **in + } + if in.EnableUnidling != nil { + in, out := &in.EnableUnidling, &out.EnableUnidling + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftSDNConfig. +func (in *OpenShiftSDNConfig) DeepCopy() *OpenShiftSDNConfig { + if in == nil { + return nil + } + out := new(OpenShiftSDNConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorCondition. +func (in *OperatorCondition) DeepCopy() *OperatorCondition { + if in == nil { + return nil + } + out := new(OperatorCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) { + *out = *in + in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides) + in.ObservedConfig.DeepCopyInto(&out.ObservedConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorSpec. +func (in *OperatorSpec) DeepCopy() *OperatorSpec { + if in == nil { + return nil + } + out := new(OperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorStatus) DeepCopyInto(out *OperatorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Generations != nil { + in, out := &in.Generations, &out.Generations + *out = make([]GenerationStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorStatus. +func (in *OperatorStatus) DeepCopy() *OperatorStatus { + if in == nil { + return nil + } + out := new(OperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyAuditConfig) DeepCopyInto(out *PolicyAuditConfig) { + *out = *in + if in.RateLimit != nil { + in, out := &in.RateLimit, &out.RateLimit + *out = new(uint32) + **out = **in + } + if in.MaxFileSize != nil { + in, out := &in.MaxFileSize, &out.MaxFileSize + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyAuditConfig. +func (in *PolicyAuditConfig) DeepCopy() *PolicyAuditConfig { + if in == nil { + return nil + } + out := new(PolicyAuditConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateStrategy) DeepCopyInto(out *PrivateStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateStrategy. +func (in *PrivateStrategy) DeepCopy() *PrivateStrategy { + if in == nil { + return nil + } + out := new(PrivateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectAccess) DeepCopyInto(out *ProjectAccess) { + *out = *in + if in.AvailableClusterRoles != nil { + in, out := &in.AvailableClusterRoles, &out.AvailableClusterRoles + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectAccess. +func (in *ProjectAccess) DeepCopy() *ProjectAccess { + if in == nil { + return nil + } + out := new(ProjectAccess) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderLoadBalancerParameters) DeepCopyInto(out *ProviderLoadBalancerParameters) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSLoadBalancerParameters) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPLoadBalancerParameters) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderLoadBalancerParameters. +func (in *ProviderLoadBalancerParameters) DeepCopy() *ProviderLoadBalancerParameters { + if in == nil { + return nil + } + out := new(ProviderLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ProxyArgumentList) DeepCopyInto(out *ProxyArgumentList) { + { + in := &in + *out = make(ProxyArgumentList, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyArgumentList. +func (in ProxyArgumentList) DeepCopy() ProxyArgumentList { + if in == nil { + return nil + } + out := new(ProxyArgumentList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.ProxyArguments != nil { + in, out := &in.ProxyArguments, &out.ProxyArguments + *out = make(map[string]ProxyArgumentList, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ProxyArgumentList, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickStarts) DeepCopyInto(out *QuickStarts) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickStarts. +func (in *QuickStarts) DeepCopy() *QuickStarts { + if in == nil { + return nil + } + out := new(QuickStarts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteAdmissionPolicy) DeepCopyInto(out *RouteAdmissionPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteAdmissionPolicy. +func (in *RouteAdmissionPolicy) DeepCopy() *RouteAdmissionPolicy { + if in == nil { + return nil + } + out := new(RouteAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SFlowConfig) DeepCopyInto(out *SFlowConfig) { + *out = *in + if in.Collectors != nil { + in, out := &in.Collectors, &out.Collectors + *out = make([]IPPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SFlowConfig. +func (in *SFlowConfig) DeepCopy() *SFlowConfig { + if in == nil { + return nil + } + out := new(SFlowConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Server) DeepCopyInto(out *Server) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.ForwardPlugin.DeepCopyInto(&out.ForwardPlugin) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. +func (in *Server) DeepCopy() *Server { + if in == nil { + return nil + } + out := new(Server) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCA) DeepCopyInto(out *ServiceCA) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCA. +func (in *ServiceCA) DeepCopy() *ServiceCA { + if in == nil { + return nil + } + out := new(ServiceCA) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCA) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCAList) DeepCopyInto(out *ServiceCAList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCA, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCAList. +func (in *ServiceCAList) DeepCopy() *ServiceCAList { + if in == nil { + return nil + } + out := new(ServiceCAList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCAList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCASpec) DeepCopyInto(out *ServiceCASpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCASpec. +func (in *ServiceCASpec) DeepCopy() *ServiceCASpec { + if in == nil { + return nil + } + out := new(ServiceCASpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCAStatus) DeepCopyInto(out *ServiceCAStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCAStatus. +func (in *ServiceCAStatus) DeepCopy() *ServiceCAStatus { + if in == nil { + return nil + } + out := new(ServiceCAStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServer) DeepCopyInto(out *ServiceCatalogAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServer. +func (in *ServiceCatalogAPIServer) DeepCopy() *ServiceCatalogAPIServer { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerList) DeepCopyInto(out *ServiceCatalogAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCatalogAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerList. +func (in *ServiceCatalogAPIServerList) DeepCopy() *ServiceCatalogAPIServerList { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerSpec) DeepCopyInto(out *ServiceCatalogAPIServerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerSpec. +func (in *ServiceCatalogAPIServerSpec) DeepCopy() *ServiceCatalogAPIServerSpec { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerStatus) DeepCopyInto(out *ServiceCatalogAPIServerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerStatus. +func (in *ServiceCatalogAPIServerStatus) DeepCopy() *ServiceCatalogAPIServerStatus { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManager) DeepCopyInto(out *ServiceCatalogControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManager. +func (in *ServiceCatalogControllerManager) DeepCopy() *ServiceCatalogControllerManager { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerList) DeepCopyInto(out *ServiceCatalogControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCatalogControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerList. +func (in *ServiceCatalogControllerManagerList) DeepCopy() *ServiceCatalogControllerManagerList { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerSpec) DeepCopyInto(out *ServiceCatalogControllerManagerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerSpec. +func (in *ServiceCatalogControllerManagerSpec) DeepCopy() *ServiceCatalogControllerManagerSpec { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerStatus) DeepCopyInto(out *ServiceCatalogControllerManagerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerStatus. +func (in *ServiceCatalogControllerManagerStatus) DeepCopy() *ServiceCatalogControllerManagerStatus { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimpleMacvlanConfig) DeepCopyInto(out *SimpleMacvlanConfig) { + *out = *in + if in.IPAMConfig != nil { + in, out := &in.IPAMConfig, &out.IPAMConfig + *out = new(IPAMConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleMacvlanConfig. +func (in *SimpleMacvlanConfig) DeepCopy() *SimpleMacvlanConfig { + if in == nil { + return nil + } + out := new(SimpleMacvlanConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMAddresses) DeepCopyInto(out *StaticIPAMAddresses) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMAddresses. +func (in *StaticIPAMAddresses) DeepCopy() *StaticIPAMAddresses { + if in == nil { + return nil + } + out := new(StaticIPAMAddresses) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMConfig) DeepCopyInto(out *StaticIPAMConfig) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]StaticIPAMAddresses, len(*in)) + copy(*out, *in) + } + if in.Routes != nil { + in, out := &in.Routes, &out.Routes + *out = make([]StaticIPAMRoutes, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(StaticIPAMDNS) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMConfig. +func (in *StaticIPAMConfig) DeepCopy() *StaticIPAMConfig { + if in == nil { + return nil + } + out := new(StaticIPAMConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMDNS) DeepCopyInto(out *StaticIPAMDNS) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Search != nil { + in, out := &in.Search, &out.Search + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMDNS. +func (in *StaticIPAMDNS) DeepCopy() *StaticIPAMDNS { + if in == nil { + return nil + } + out := new(StaticIPAMDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMRoutes) DeepCopyInto(out *StaticIPAMRoutes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMRoutes. +func (in *StaticIPAMRoutes) DeepCopy() *StaticIPAMRoutes { + if in == nil { + return nil + } + out := new(StaticIPAMRoutes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticPodOperatorSpec) DeepCopyInto(out *StaticPodOperatorSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorSpec. +func (in *StaticPodOperatorSpec) DeepCopy() *StaticPodOperatorSpec { + if in == nil { + return nil + } + out := new(StaticPodOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticPodOperatorStatus) DeepCopyInto(out *StaticPodOperatorStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + if in.NodeStatuses != nil { + in, out := &in.NodeStatuses, &out.NodeStatuses + *out = make([]NodeStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorStatus. +func (in *StaticPodOperatorStatus) DeepCopy() *StaticPodOperatorStatus { + if in == nil { + return nil + } + out := new(StaticPodOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatuspageProvider) DeepCopyInto(out *StatuspageProvider) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatuspageProvider. +func (in *StatuspageProvider) DeepCopy() *StatuspageProvider { + if in == nil { + return nil + } + out := new(StatuspageProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Storage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageList) DeepCopyInto(out *StorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Storage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageList. +func (in *StorageList) DeepCopy() *StorageList { + if in == nil { + return nil + } + out := new(StorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { + if in == nil { + return nil + } + out := new(StorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStatus) DeepCopyInto(out *StorageStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStatus. +func (in *StorageStatus) DeepCopy() *StorageStatus { + if in == nil { + return nil + } + out := new(StorageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyslogLoggingDestinationParameters) DeepCopyInto(out *SyslogLoggingDestinationParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyslogLoggingDestinationParameters. +func (in *SyslogLoggingDestinationParameters) DeepCopy() *SyslogLoggingDestinationParameters { + if in == nil { + return nil + } + out := new(SyslogLoggingDestinationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000..c1109833c1 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,1219 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_GenerationStatus = map[string]string{ + "": "GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made.", + "group": "group is the group of the thing you're tracking", + "resource": "resource is the resource type of the thing you're tracking", + "namespace": "namespace is where the thing you're tracking is", + "name": "name is the name of the thing you're tracking", + "lastGeneration": "lastGeneration is the last generation of the workload controller involved", + "hash": "hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps", +} + +func (GenerationStatus) SwaggerDoc() map[string]string { + return map_GenerationStatus +} + +var map_MyOperatorResource = map[string]string{ + "": "MyOperatorResource is an example operator configuration type", +} + +func (MyOperatorResource) SwaggerDoc() map[string]string { + return map_MyOperatorResource +} + +var map_NodeStatus = map[string]string{ + "": "NodeStatus provides information about the current state of a particular node managed by this operator.", + "nodeName": "nodeName is the name of the node", + "currentRevision": "currentRevision is the generation of the most recently successful deployment", + "targetRevision": "targetRevision is the generation of the deployment we're trying to apply", + "lastFailedRevision": "lastFailedRevision is the generation of the deployment we tried and failed to deploy.", + "lastFailedTime": "lastFailedTime is the time the last failed revision failed the last time.", + "lastFailedCount": "lastFailedCount is how often the last failed revision failed.", + "lastFailedRevisionErrors": "lastFailedRevisionErrors is a list of the errors during the failed deployment referenced in lastFailedRevision", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + +var map_OperatorCondition = map[string]string{ + "": "OperatorCondition is just the standard condition fields.", +} + +func (OperatorCondition) SwaggerDoc() map[string]string { + return map_OperatorCondition +} + +var map_OperatorSpec = map[string]string{ + "": "OperatorSpec contains common fields operators need. It is intended to be anonymous included inside of the Spec struct for your particular operator.", + "managementState": "managementState indicates whether and how the operator should manage the component", + "logLevel": "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", + "operatorLogLevel": "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", + "unsupportedConfigOverrides": "unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides", + "observedConfig": "observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator", +} + +func (OperatorSpec) SwaggerDoc() map[string]string { + return map_OperatorSpec +} + +var map_OperatorStatus = map[string]string{ + "observedGeneration": "observedGeneration is the last generation change you've dealt with", + "conditions": "conditions is a list of conditions and their status", + "version": "version is the level this availability applies to", + "readyReplicas": "readyReplicas indicates how many replicas are ready and at the desired state", + "generations": "generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction.", +} + +func (OperatorStatus) SwaggerDoc() map[string]string { + return map_OperatorStatus +} + +var map_StaticPodOperatorSpec = map[string]string{ + "": "StaticPodOperatorSpec is spec for controllers that manage static pods.", + "forceRedeploymentReason": "forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", + "failedRevisionLimit": "failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", + "succeededRevisionLimit": "succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", +} + +func (StaticPodOperatorSpec) SwaggerDoc() map[string]string { + return map_StaticPodOperatorSpec +} + +var map_StaticPodOperatorStatus = map[string]string{ + "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked.", + "latestAvailableRevision": "latestAvailableRevision is the deploymentID of the most recent deployment", + "latestAvailableRevisionReason": "latestAvailableRevisionReason describe the detailed reason for the most recent deployment", + "nodeStatuses": "nodeStatuses track the deployment values and errors across individual nodes", +} + +func (StaticPodOperatorStatus) SwaggerDoc() map[string]string { + return map_StaticPodOperatorStatus +} + +var map_Authentication = map[string]string{ + "": "Authentication provides information to configure an operator to manage authentication.", +} + +func (Authentication) SwaggerDoc() map[string]string { + return map_Authentication +} + +var map_AuthenticationList = map[string]string{ + "": "AuthenticationList is a collection of items", +} + +func (AuthenticationList) SwaggerDoc() map[string]string { + return map_AuthenticationList +} + +var map_AuthenticationStatus = map[string]string{ + "oauthAPIServer": "OAuthAPIServer holds status specific only to oauth-apiserver", +} + +func (AuthenticationStatus) SwaggerDoc() map[string]string { + return map_AuthenticationStatus +} + +var map_OAuthAPIServerStatus = map[string]string{ + "latestAvailableRevision": "LatestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", +} + +func (OAuthAPIServerStatus) SwaggerDoc() map[string]string { + return map_OAuthAPIServerStatus +} + +var map_CloudCredential = map[string]string{ + "": "CloudCredential provides a means to configure an operator to manage CredentialsRequests.", +} + +func (CloudCredential) SwaggerDoc() map[string]string { + return map_CloudCredential +} + +var map_CloudCredentialSpec = map[string]string{ + "": "CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator.", + "credentialsMode": "CredentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into \"manual\" mode if desired. Leaving the field in default mode runs CCO so that the cluster's cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes:\n AWS/Azure/GCP: \"\" (Default), \"Mint\", \"Passthrough\", \"Manual\"\n Others: Do not set value as other platforms only support running in \"Passthrough\"", +} + +func (CloudCredentialSpec) SwaggerDoc() map[string]string { + return map_CloudCredentialSpec +} + +var map_CloudCredentialStatus = map[string]string{ + "": "CloudCredentialStatus defines the observed status of the cloud-credential-operator.", +} + +func (CloudCredentialStatus) SwaggerDoc() map[string]string { + return map_CloudCredentialStatus +} + +var map_Config = map[string]string{ + "": "Config provides information to configure the config operator. It handles installation, migration or synchronization of cloud based cluster configurations like AWS or Azure.", + "spec": "spec is the specification of the desired behavior of the Config Operator.", + "status": "status defines the observed status of the Config Operator.", +} + +func (Config) SwaggerDoc() map[string]string { + return map_Config +} + +var map_ConfigList = map[string]string{ + "": "ConfigList is a collection of items", + "items": "Items contains the items", +} + +func (ConfigList) SwaggerDoc() map[string]string { + return map_ConfigList +} + +var map_AddPage = map[string]string{ + "": "AddPage allows customizing actions on the Add page in developer perspective.", + "disabledActions": "disabledActions is a list of actions that are not shown to users. Each action in the list is represented by its ID.", +} + +func (AddPage) SwaggerDoc() map[string]string { + return map_AddPage +} + +var map_Console = map[string]string{ + "": "Console provides a means to configure an operator to manage the console.", +} + +func (Console) SwaggerDoc() map[string]string { + return map_Console +} + +var map_ConsoleConfigRoute = map[string]string{ + "": "ConsoleConfigRoute holds information on external route access to console. DEPRECATED", + "hostname": "hostname is the desired custom domain under which console will be available.", + "secret": "secret points to secret in the openshift-config namespace that contains custom certificate and key and needs to be created manually by the cluster admin. Referenced Secret is required to contain following key value pairs: - \"tls.crt\" - to specifies custom certificate - \"tls.key\" - to specifies private key of the custom certificate If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.", +} + +func (ConsoleConfigRoute) SwaggerDoc() map[string]string { + return map_ConsoleConfigRoute +} + +var map_ConsoleCustomization = map[string]string{ + "": "ConsoleCustomization defines a list of optional configuration for the console UI.", + "brand": "brand is the default branding of the web console which can be overridden by providing the brand field. There is a limited set of specific brand options. This field controls elements of the console such as the logo. Invalid value will prevent a console rollout.", + "documentationBaseURL": "documentationBaseURL links to external documentation are shown in various sections of the web console. Providing documentationBaseURL will override the default documentation URL. Invalid value will prevent a console rollout.", + "customProductName": "customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog instead of the normal OpenShift product name.", + "customLogoFile": "customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a ConfigMap in the openshift-config namespace. This can be created with a command like 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. Recommended logo specifications: Dimensions: Max height of 68px and max width of 200px SVG format preferred", + "developerCatalog": "developerCatalog allows to configure the shown developer catalog categories.", + "projectAccess": "projectAccess allows customizing the available list of ClusterRoles in the Developer perspective Project access page which can be used by a project admin to specify roles to other users and restrict access within the project. If set, the list will replace the default ClusterRole options.", + "quickStarts": "quickStarts allows customization of available ConsoleQuickStart resources in console.", + "addPage": "addPage allows customizing actions on the Add page in developer perspective.", +} + +func (ConsoleCustomization) SwaggerDoc() map[string]string { + return map_ConsoleCustomization +} + +var map_ConsoleProviders = map[string]string{ + "": "ConsoleProviders defines a list of optional additional providers of functionality to the console.", + "statuspage": "statuspage contains ID for statuspage.io page that provides status info about.", +} + +func (ConsoleProviders) SwaggerDoc() map[string]string { + return map_ConsoleProviders +} + +var map_ConsoleSpec = map[string]string{ + "": "ConsoleSpec is the specification of the desired behavior of the Console.", + "customization": "customization is used to optionally provide a small set of customization options to the web console.", + "providers": "providers contains configuration for using specific service providers.", + "route": "route contains hostname and secret reference that contains the serving certificate. If a custom route is specified, a new route will be created with the provided hostname, under which console will be available. In case of custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. The default console route will be maintained to reserve the default hostname for console if the custom route is removed. If not specified, default route will be used. DEPRECATED", + "plugins": "plugins defines a list of enabled console plugin names.", +} + +func (ConsoleSpec) SwaggerDoc() map[string]string { + return map_ConsoleSpec +} + +var map_ConsoleStatus = map[string]string{ + "": "ConsoleStatus defines the observed status of the Console.", +} + +func (ConsoleStatus) SwaggerDoc() map[string]string { + return map_ConsoleStatus +} + +var map_DeveloperConsoleCatalogCategory = map[string]string{ + "": "DeveloperConsoleCatalogCategory for the developer console catalog.", + "subcategories": "subcategories defines a list of child categories.", +} + +func (DeveloperConsoleCatalogCategory) SwaggerDoc() map[string]string { + return map_DeveloperConsoleCatalogCategory +} + +var map_DeveloperConsoleCatalogCategoryMeta = map[string]string{ + "": "DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category.", + "id": "ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.", + "label": "label defines a category display label. It is required and must have 1-64 characters.", + "tags": "tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item.", +} + +func (DeveloperConsoleCatalogCategoryMeta) SwaggerDoc() map[string]string { + return map_DeveloperConsoleCatalogCategoryMeta +} + +var map_DeveloperConsoleCatalogCustomization = map[string]string{ + "": "DeveloperConsoleCatalogCustomization allow cluster admin to configure developer catalog.", + "categories": "categories which are shown in the developer catalog.", +} + +func (DeveloperConsoleCatalogCustomization) SwaggerDoc() map[string]string { + return map_DeveloperConsoleCatalogCustomization +} + +var map_ProjectAccess = map[string]string{ + "": "ProjectAccess contains options for project access roles", + "availableClusterRoles": "availableClusterRoles is the list of ClusterRole names that are assignable to users through the project access tab.", +} + +func (ProjectAccess) SwaggerDoc() map[string]string { + return map_ProjectAccess +} + +var map_QuickStarts = map[string]string{ + "": "QuickStarts allow cluster admins to customize available ConsoleQuickStart resources.", + "disabled": "disabled is a list of ConsoleQuickStart resource names that are not shown to users.", +} + +func (QuickStarts) SwaggerDoc() map[string]string { + return map_QuickStarts +} + +var map_StatuspageProvider = map[string]string{ + "": "StatuspageProvider provides identity for statuspage account.", + "pageID": "pageID is the unique ID assigned by Statuspage for your page. This must be a public page.", +} + +func (StatuspageProvider) SwaggerDoc() map[string]string { + return map_StatuspageProvider +} + +var map_ClusterCSIDriver = map[string]string{ + "": "ClusterCSIDriver object allows management and configuration of a CSI driver operator installed by default in OpenShift. Name of the object must be name of the CSI driver it operates. See CSIDriverName type for list of allowed values.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (ClusterCSIDriver) SwaggerDoc() map[string]string { + return map_ClusterCSIDriver +} + +var map_ClusterCSIDriverList = map[string]string{ + "": "ClusterCSIDriverList contains a list of ClusterCSIDriver", +} + +func (ClusterCSIDriverList) SwaggerDoc() map[string]string { + return map_ClusterCSIDriverList +} + +var map_ClusterCSIDriverSpec = map[string]string{ + "": "ClusterCSIDriverSpec is the desired behavior of CSI driver operator", +} + +func (ClusterCSIDriverSpec) SwaggerDoc() map[string]string { + return map_ClusterCSIDriverSpec +} + +var map_ClusterCSIDriverStatus = map[string]string{ + "": "ClusterCSIDriverStatus is the observed status of CSI driver operator", +} + +func (ClusterCSIDriverStatus) SwaggerDoc() map[string]string { + return map_ClusterCSIDriverStatus +} + +var map_CSISnapshotController = map[string]string{ + "": "CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (CSISnapshotController) SwaggerDoc() map[string]string { + return map_CSISnapshotController +} + +var map_CSISnapshotControllerList = map[string]string{ + "": "CSISnapshotControllerList contains a list of CSISnapshotControllers.", +} + +func (CSISnapshotControllerList) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerList +} + +var map_CSISnapshotControllerSpec = map[string]string{ + "": "CSISnapshotControllerSpec is the specification of the desired behavior of the CSISnapshotController operator.", +} + +func (CSISnapshotControllerSpec) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerSpec +} + +var map_CSISnapshotControllerStatus = map[string]string{ + "": "CSISnapshotControllerStatus defines the observed status of the CSISnapshotController operator.", +} + +func (CSISnapshotControllerStatus) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerStatus +} + +var map_DNS = map[string]string{ + "": "DNS manages the CoreDNS component to provide a name resolution service for pods and services in the cluster.\n\nThis supports the DNS-based service discovery specification: https://github.com/kubernetes/dns/blob/master/docs/specification.md\n\nMore details: https://kubernetes.io/docs/tasks/administer-cluster/coredns", + "spec": "spec is the specification of the desired behavior of the DNS.", + "status": "status is the most recently observed status of the DNS.", +} + +func (DNS) SwaggerDoc() map[string]string { + return map_DNS +} + +var map_DNSList = map[string]string{ + "": "DNSList contains a list of DNS", +} + +func (DNSList) SwaggerDoc() map[string]string { + return map_DNSList +} + +var map_DNSNodePlacement = map[string]string{ + "": "DNSNodePlacement describes the node scheduling configuration for DNS pods.", + "nodeSelector": "nodeSelector is the node selector applied to DNS pods.\n\nIf empty, the default is used, which is currently the following:\n\n kubernetes.io/os: linux\n\nThis default is subject to change.\n\nIf set, the specified selector is used and replaces the default.", + "tolerations": "tolerations is a list of tolerations applied to DNS pods.\n\nIf empty, the DNS operator sets a toleration for the \"node-role.kubernetes.io/master\" taint. This default is subject to change. Specifying tolerations without including a toleration for the \"node-role.kubernetes.io/master\" taint may be risky as it could lead to an outage if all worker nodes become unavailable.\n\nNote that the daemon controller adds some tolerations as well. See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/", +} + +func (DNSNodePlacement) SwaggerDoc() map[string]string { + return map_DNSNodePlacement +} + +var map_DNSSpec = map[string]string{ + "": "DNSSpec is the specification of the desired behavior of the DNS.", + "servers": "servers is a list of DNS resolvers that provide name query delegation for one or more subdomains outside the scope of the cluster domain. If servers consists of more than one Server, longest suffix match will be used to determine the Server.\n\nFor example, if there are two Servers, one for \"foo.com\" and another for \"a.foo.com\", and the name query is for \"www.a.foo.com\", it will be routed to the Server with Zone \"a.foo.com\".\n\nIf this field is nil, no servers are created.", + "nodePlacement": "nodePlacement provides explicit control over the scheduling of DNS pods.\n\nGenerally, it is useful to run a DNS pod on every node so that DNS queries are always handled by a local DNS pod instead of going over the network to a DNS pod on another node. However, security policies may require restricting the placement of DNS pods to specific nodes. For example, if a security policy prohibits pods on arbitrary nodes from communicating with the API, a node selector can be specified to restrict DNS pods to nodes that are permitted to communicate with the API. Conversely, if running DNS pods on nodes with a particular taint is desired, a toleration can be specified for that taint.\n\nIf unset, defaults are used. See nodePlacement for more details.", +} + +func (DNSSpec) SwaggerDoc() map[string]string { + return map_DNSSpec +} + +var map_DNSStatus = map[string]string{ + "": "DNSStatus defines the observed status of the DNS.", + "clusterIP": "clusterIP is the service IP through which this DNS is made available.\n\nIn the case of the default DNS, this will be a well known IP that is used as the default nameserver for pods that are using the default ClusterFirst DNS policy.\n\nIn general, this IP can be specified in a pod's spec.dnsConfig.nameservers list or used explicitly when performing name resolution from within the cluster. Example: dig foo.com @\n\nMore info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "clusterDomain": "clusterDomain is the local cluster DNS domain suffix for DNS services. This will be a subdomain as defined in RFC 1034, section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 Example: \"cluster.local\"\n\nMore info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service", + "conditions": "conditions provide information about the state of the DNS on the cluster.\n\nThese are the supported DNS conditions:\n\n * Available\n - True if the following conditions are met:\n * DNS controller daemonset is available.\n - False if any of those conditions are unsatisfied.", +} + +func (DNSStatus) SwaggerDoc() map[string]string { + return map_DNSStatus +} + +var map_ForwardPlugin = map[string]string{ + "": "ForwardPlugin defines a schema for configuring the CoreDNS forward plugin.", + "upstreams": "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Upstreams are randomized when more than 1 upstream is specified. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin.", +} + +func (ForwardPlugin) SwaggerDoc() map[string]string { + return map_ForwardPlugin +} + +var map_Server = map[string]string{ + "": "Server defines the schema for a server that runs per instance of CoreDNS.", + "name": "name is required and specifies a unique name for the server. Name must comply with the Service Name Syntax of rfc6335.", + "zones": "zones is required and specifies the subdomains that Server is authoritative for. Zones must conform to the rfc1123 definition of a subdomain. Specifying the cluster domain (i.e., \"cluster.local\") is invalid.", + "forwardPlugin": "forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers.", +} + +func (Server) SwaggerDoc() map[string]string { + return map_Server +} + +var map_Etcd = map[string]string{ + "": "Etcd provides information to configure an operator to manage etcd.", +} + +func (Etcd) SwaggerDoc() map[string]string { + return map_Etcd +} + +var map_EtcdList = map[string]string{ + "": "KubeAPISOperatorConfigList is a collection of items", + "items": "Items contains the items", +} + +func (EtcdList) SwaggerDoc() map[string]string { + return map_EtcdList +} + +var map_AWSClassicLoadBalancerParameters = map[string]string{ + "": "AWSClassicLoadBalancerParameters holds configuration parameters for an AWS Classic load balancer.", +} + +func (AWSClassicLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_AWSClassicLoadBalancerParameters +} + +var map_AWSLoadBalancerParameters = map[string]string{ + "": "AWSLoadBalancerParameters provides configuration settings that are specific to AWS load balancers.", + "type": "type is the type of AWS load balancer to instantiate for an ingresscontroller.\n\nValid values are:\n\n* \"Classic\": A Classic Load Balancer that makes routing decisions at either\n the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See\n the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb\n\n* \"NLB\": A Network Load Balancer that makes routing decisions at the\n transport layer (TCP/SSL). See the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb", + "classicLoadBalancer": "classicLoadBalancerParameters holds configuration parameters for an AWS classic load balancer. Present only if type is Classic.", + "networkLoadBalancer": "networkLoadBalancerParameters holds configuration parameters for an AWS network load balancer. Present only if type is NLB.", +} + +func (AWSLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_AWSLoadBalancerParameters +} + +var map_AWSNetworkLoadBalancerParameters = map[string]string{ + "": "AWSNetworkLoadBalancerParameters holds configuration parameters for an AWS Network load balancer.", +} + +func (AWSNetworkLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_AWSNetworkLoadBalancerParameters +} + +var map_AccessLogging = map[string]string{ + "": "AccessLogging describes how client requests should be logged.", + "destination": "destination is where access logs go.", + "httpLogFormat": "httpLogFormat specifies the format of the log message for an HTTP request.\n\nIf this field is empty, log messages use the implementation's default HTTP log format. For HAProxy's default HTTP log format, see the HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3\n\nNote that this format only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). It does not affect the log format for TLS passthrough connections.", + "httpCaptureHeaders": "httpCaptureHeaders defines HTTP headers that should be captured in access logs. If this field is empty, no headers are captured.\n\nNote that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be captured for TLS passthrough connections.", + "httpCaptureCookies": "httpCaptureCookies specifies HTTP cookies that should be captured in access logs. If this field is empty, no cookies are captured.", +} + +func (AccessLogging) SwaggerDoc() map[string]string { + return map_AccessLogging +} + +var map_ContainerLoggingDestinationParameters = map[string]string{ + "": "ContainerLoggingDestinationParameters describes parameters for the Container logging destination type.", +} + +func (ContainerLoggingDestinationParameters) SwaggerDoc() map[string]string { + return map_ContainerLoggingDestinationParameters +} + +var map_EndpointPublishingStrategy = map[string]string{ + "": "EndpointPublishingStrategy is a way to publish the endpoints of an IngressController, and represents the type and any additional configuration for a specific type.", + "type": "type is the publishing strategy to use. Valid values are:\n\n* LoadBalancerService\n\nPublishes the ingress controller using a Kubernetes LoadBalancer Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment.\n\nSee: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer\n\nIf domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone.\n\nWildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms.\n\n* HostNetwork\n\nPublishes the ingress controller on node ports where the ingress controller is deployed.\n\nIn this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports.\n\n* Private\n\nDoes not publish the ingress controller.\n\nIn this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller.\n\n* NodePortService\n\nPublishes the ingress controller using a Kubernetes NodePort Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A NodePort Service is created to publish the deployment. The specific node ports are dynamically allocated by OpenShift; however, to support static port allocations, user changes to the node port field of the managed NodePort Service will preserved.", + "loadBalancer": "loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService.", + "hostNetwork": "hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork.", + "private": "private holds parameters for the Private endpoint publishing strategy. Present only if type is Private.", + "nodePort": "nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService.", +} + +func (EndpointPublishingStrategy) SwaggerDoc() map[string]string { + return map_EndpointPublishingStrategy +} + +var map_GCPLoadBalancerParameters = map[string]string{ + "": "GCPLoadBalancerParameters provides configuration settings that are specific to GCP load balancers.", + "clientAccess": "clientAccess describes how client access is restricted for internal load balancers.\n\nValid values are: * \"Global\": Specifying an internal load balancer with Global client access\n allows clients from any region within the VPC to communicate with the load\n balancer.\n\n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access\n\n* \"Local\": Specifying an internal load balancer with Local client access\n means only clients within the same region (and VPC) as the GCP load balancer\n can communicate with the load balancer. Note that this is the default behavior.\n\n https://cloud.google.com/load-balancing/docs/internal#client_access", +} + +func (GCPLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_GCPLoadBalancerParameters +} + +var map_HostNetworkStrategy = map[string]string{ + "": "HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing strategy.", + "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", +} + +func (HostNetworkStrategy) SwaggerDoc() map[string]string { + return map_HostNetworkStrategy +} + +var map_IngressController = map[string]string{ + "": "IngressController describes a managed ingress controller for the cluster. The controller can service OpenShift Route and Kubernetes Ingress resources.\n\nWhen an IngressController is created, a new ingress controller deployment is created to allow external traffic to reach the services that expose Ingress or Route resources. Updating this resource may lead to disruption for public facing network connections as a new ingress controller revision may be rolled out.\n\nhttps://kubernetes.io/docs/concepts/services-networking/ingress-controllers\n\nWhenever possible, sensible defaults for the platform are used. See each field for more details.", + "spec": "spec is the specification of the desired behavior of the IngressController.", + "status": "status is the most recently observed status of the IngressController.", +} + +func (IngressController) SwaggerDoc() map[string]string { + return map_IngressController +} + +var map_IngressControllerCaptureHTTPCookie = map[string]string{ + "": "IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be captured.", + "maxLength": "maxLength specifies a maximum length of the string that will be logged, which includes the cookie name, cookie value, and one-character delimiter. If the log entry exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request.", +} + +func (IngressControllerCaptureHTTPCookie) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPCookie +} + +var map_IngressControllerCaptureHTTPCookieUnion = map[string]string{ + "": "IngressControllerCaptureHTTPCookieUnion describes optional fields of an HTTP cookie that should be captured.", + "matchType": "matchType specifies the type of match to be performed on the cookie name. Allowed values are \"Exact\" for an exact string match and \"Prefix\" for a string prefix match. If \"Exact\" is specified, a name must be specified in the name field. If \"Prefix\" is provided, a prefix must be specified in the namePrefix field. For example, specifying matchType \"Prefix\" and namePrefix \"foo\" will capture a cookie named \"foo\" or \"foobar\" but not one named \"bar\". The first matching cookie is captured.", + "name": "name specifies a cookie name. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1.", + "namePrefix": "namePrefix specifies a cookie name prefix. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1.", +} + +func (IngressControllerCaptureHTTPCookieUnion) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPCookieUnion +} + +var map_IngressControllerCaptureHTTPHeader = map[string]string{ + "": "IngressControllerCaptureHTTPHeader describes an HTTP header that should be captured.", + "name": "name specifies a header name. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2.", + "maxLength": "maxLength specifies a maximum length for the header value. If a header value exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request.", +} + +func (IngressControllerCaptureHTTPHeader) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPHeader +} + +var map_IngressControllerCaptureHTTPHeaders = map[string]string{ + "": "IngressControllerCaptureHTTPHeaders specifies which HTTP headers the IngressController captures.", + "request": "request specifies which HTTP request headers to capture.\n\nIf this field is empty, no request headers are captured.", + "response": "response specifies which HTTP response headers to capture.\n\nIf this field is empty, no response headers are captured.", +} + +func (IngressControllerCaptureHTTPHeaders) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPHeaders +} + +var map_IngressControllerHTTPHeaders = map[string]string{ + "": "IngressControllerHTTPHeaders specifies how the IngressController handles certain HTTP headers.", + "forwardedHeaderPolicy": "forwardedHeaderPolicy specifies when and how the IngressController sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version HTTP headers. The value may be one of the following:\n\n* \"Append\", which specifies that the IngressController appends the\n headers, preserving existing headers.\n\n* \"Replace\", which specifies that the IngressController sets the\n headers, replacing any existing Forwarded or X-Forwarded-* headers.\n\n* \"IfNone\", which specifies that the IngressController sets the\n headers if they are not already set.\n\n* \"Never\", which specifies that the IngressController never sets the\n headers, preserving any existing headers.\n\nBy default, the policy is \"Append\".", + "uniqueId": "uniqueId describes configuration for a custom HTTP header that the ingress controller should inject into incoming HTTP requests. Typically, this header is configured to have a value that is unique to the HTTP request. The header can be used by applications or included in access logs to facilitate tracing individual HTTP requests.\n\nIf this field is empty, no such header is injected into requests.", + "headerNameCaseAdjustments": "headerNameCaseAdjustments specifies case adjustments that can be applied to HTTP header names. Each adjustment is specified as an HTTP header name with the desired capitalization. For example, specifying \"X-Forwarded-For\" indicates that the \"x-forwarded-for\" HTTP header should be adjusted to have the specified capitalization.\n\nThese adjustments are only applied to cleartext, edge-terminated, and re-encrypt routes, and only when using HTTP/1.\n\nFor request headers, these adjustments are applied only for routes that have the haproxy.router.openshift.io/h1-adjust-case=true annotation. For response headers, these adjustments are applied to all HTTP responses.\n\nIf this field is empty, no request headers are adjusted.", +} + +func (IngressControllerHTTPHeaders) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPHeaders +} + +var map_IngressControllerHTTPUniqueIdHeaderPolicy = map[string]string{ + "": "IngressControllerHTTPUniqueIdHeaderPolicy describes configuration for a unique id header.", + "name": "name specifies the name of the HTTP header (for example, \"unique-id\") that the ingress controller should inject into HTTP requests. The field's value must be a valid HTTP header name as defined in RFC 2616 section 4.2. If the field is empty, no header is injected.", + "format": "format specifies the format for the injected HTTP header's value. This field has no effect unless name is specified. For the HAProxy-based ingress controller implementation, this format uses the same syntax as the HTTP log format. If the field is empty, the default value is \"%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid\"; see the corresponding HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3", +} + +func (IngressControllerHTTPUniqueIdHeaderPolicy) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPUniqueIdHeaderPolicy +} + +var map_IngressControllerList = map[string]string{ + "": "IngressControllerList contains a list of IngressControllers.", +} + +func (IngressControllerList) SwaggerDoc() map[string]string { + return map_IngressControllerList +} + +var map_IngressControllerLogging = map[string]string{ + "": "IngressControllerLogging describes what should be logged where.", + "access": "access describes how the client requests should be logged.\n\nIf this field is empty, access logging is disabled.", +} + +func (IngressControllerLogging) SwaggerDoc() map[string]string { + return map_IngressControllerLogging +} + +var map_IngressControllerSpec = map[string]string{ + "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", + "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", + "httpErrorCodePages": "httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format \"error-page-.http\", where is an HTTP error code. For example, \"error-page-503.http\" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages.", + "replicas": "replicas is the desired number of ingress controller replicas. If unset, defaults to 2.", + "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", + "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nIf a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.", + "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", + "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", + "nodePlacement": "nodePlacement enables explicit control over the scheduling of the ingress controller.\n\nIf unset, defaults are used. See NodePlacement for more details.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers.\n\nIf unset, the default is based on the apiservers.config.openshift.io/cluster resource.\n\nNote that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout.\n\nNote that the minimum TLS version for ingress controllers is 1.1, and the maximum TLS version is 1.2. An implication of this restriction is that the Modern TLS profile type cannot be used because it requires TLS 1.3.", + "routeAdmission": "routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces).\n\nIf empty, defaults will be applied. See specific routeAdmission fields for details about their defaults.", + "logging": "logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled.", + "httpHeaders": "httpHeaders defines policy for HTTP headers.\n\nIf this field is empty, the default values are used.", + "tuningOptions": "tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details.\n\nSetting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations.", + "unsupportedConfigOverrides": "unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported.", +} + +func (IngressControllerSpec) SwaggerDoc() map[string]string { + return map_IngressControllerSpec +} + +var map_IngressControllerStatus = map[string]string{ + "": "IngressControllerStatus defines the observed status of the IngressController.", + "availableReplicas": "availableReplicas is number of observed available replicas according to the ingress controller deployment.", + "selector": "selector is a label selector, in string format, for ingress controller pods corresponding to the IngressController. The number of matching pods should equal the value of availableReplicas.", + "domain": "domain is the actual domain in use.", + "endpointPublishingStrategy": "endpointPublishingStrategy is the actual strategy in use.", + "conditions": "conditions is a list of conditions and their status.\n\nAvailable means the ingress controller deployment is available and servicing route and ingress resources (i.e, .status.availableReplicas equals .spec.replicas)\n\nThere are additional conditions which indicate the status of other ingress controller features and capabilities.\n\n * LoadBalancerManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy requires a service load balancer.\n - False if any of those conditions are unsatisfied.\n\n * LoadBalancerReady\n - True if the following conditions are met:\n * A load balancer is managed.\n * The load balancer is ready.\n - False if any of those conditions are unsatisfied.\n\n * DNSManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy and platform support DNS.\n * The ingress controller domain is set.\n * dns.config.openshift.io/cluster configures DNS zones.\n - False if any of those conditions are unsatisfied.\n\n * DNSReady\n - True if the following conditions are met:\n * DNS is managed.\n * DNS records have been successfully created.\n - False if any of those conditions are unsatisfied.", + "tlsProfile": "tlsProfile is the TLS connection configuration that is in effect.", + "observedGeneration": "observedGeneration is the most recent generation observed.", +} + +func (IngressControllerStatus) SwaggerDoc() map[string]string { + return map_IngressControllerStatus +} + +var map_IngressControllerTuningOptions = map[string]string{ + "": "IngressControllerTuningOptions specifies options for tuning the performance of ingress controller pods", + "headerBufferBytes": "headerBufferBytes describes how much memory should be reserved (in bytes) for IngressController connection sessions. Note that this value must be at least 16384 if HTTP/2 is enabled for the IngressController (https://tools.ietf.org/html/rfc7540). If this field is empty, the IngressController will use a default value of 32768 bytes.\n\nSetting this field is generally not recommended as headerBufferBytes values that are too small may break the IngressController and headerBufferBytes values that are too large could cause the IngressController to use significantly more memory than necessary.", + "headerBufferMaxRewriteBytes": "headerBufferMaxRewriteBytes describes how much memory should be reserved (in bytes) from headerBufferBytes for HTTP header rewriting and appending for IngressController connection sessions. Note that incoming HTTP requests will be limited to (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning headerBufferBytes must be greater than headerBufferMaxRewriteBytes. If this field is empty, the IngressController will use a default value of 8192 bytes.\n\nSetting this field is generally not recommended as headerBufferMaxRewriteBytes values that are too small may break the IngressController and headerBufferMaxRewriteBytes values that are too large could cause the IngressController to use significantly more memory than necessary.", + "threadCount": "threadCount defines the number of threads created per HAProxy process. Creating more threads allows each ingress controller pod to handle more connections, at the cost of more system resources being used. HAProxy currently supports up to 64 threads. If this field is empty, the IngressController will use the default value. The current default is 4 threads, but this may change in future releases.\n\nSetting this field is generally not recommended. Increasing the number of HAProxy threads allows ingress controller pods to utilize more CPU time under load, potentially starving other pods if set too high. Reducing the number of threads may cause the ingress controller to perform poorly.", +} + +func (IngressControllerTuningOptions) SwaggerDoc() map[string]string { + return map_IngressControllerTuningOptions +} + +var map_LoadBalancerStrategy = map[string]string{ + "": "LoadBalancerStrategy holds parameters for a load balancer.", + "scope": "scope indicates the scope at which the load balancer is exposed. Possible values are \"External\" and \"Internal\".", + "providerParameters": "providerParameters holds desired load balancer information specific to the underlying infrastructure provider.\n\nIf empty, defaults will be applied. See specific providerParameters fields for details about their defaults.", +} + +func (LoadBalancerStrategy) SwaggerDoc() map[string]string { + return map_LoadBalancerStrategy +} + +var map_LoggingDestination = map[string]string{ + "": "LoggingDestination describes a destination for log messages.", + "type": "type is the type of destination for logs. It must be one of the following:\n\n* Container\n\nThe ingress operator configures the sidecar container named \"logs\" on the ingress controller pod and configures the ingress controller to write logs to the sidecar. The logs are then available as container logs. The expectation is that the administrator configures a custom logging solution that reads logs from this sidecar. Note that using container logs means that logs may be dropped if the rate of logs exceeds the container runtime's or the custom logging solution's capacity.\n\n* Syslog\n\nLogs are sent to a syslog endpoint. The administrator must specify an endpoint that can receive syslog messages. The expectation is that the administrator has configured a custom syslog instance.", + "syslog": "syslog holds parameters for a syslog endpoint. Present only if type is Syslog.", + "container": "container holds parameters for the Container logging destination. Present only if type is Container.", +} + +func (LoggingDestination) SwaggerDoc() map[string]string { + return map_LoggingDestination +} + +var map_NodePlacement = map[string]string{ + "": "NodePlacement describes node scheduling configuration for an ingress controller.", + "nodeSelector": "nodeSelector is the node selector applied to ingress controller deployments.\n\nIf unset, the default is:\n\n kubernetes.io/os: linux\n node-role.kubernetes.io/worker: ''\n\nIf set, the specified selector is used and replaces the default.", + "tolerations": "tolerations is a list of tolerations applied to ingress controller deployments.\n\nThe default is an empty list.\n\nSee https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/", +} + +func (NodePlacement) SwaggerDoc() map[string]string { + return map_NodePlacement +} + +var map_NodePortStrategy = map[string]string{ + "": "NodePortStrategy holds parameters for the NodePortService endpoint publishing strategy.", + "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", +} + +func (NodePortStrategy) SwaggerDoc() map[string]string { + return map_NodePortStrategy +} + +var map_PrivateStrategy = map[string]string{ + "": "PrivateStrategy holds parameters for the Private endpoint publishing strategy.", +} + +func (PrivateStrategy) SwaggerDoc() map[string]string { + return map_PrivateStrategy +} + +var map_ProviderLoadBalancerParameters = map[string]string{ + "": "ProviderLoadBalancerParameters holds desired load balancer information specific to the underlying infrastructure provider.", + "type": "type is the underlying infrastructure provider for the load balancer. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"OpenStack\", and \"VSphere\".", + "aws": "aws provides configuration settings that are specific to AWS load balancers.\n\nIf empty, defaults will be applied. See specific aws fields for details about their defaults.", + "gcp": "gcp provides configuration settings that are specific to GCP load balancers.\n\nIf empty, defaults will be applied. See specific gcp fields for details about their defaults.", +} + +func (ProviderLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_ProviderLoadBalancerParameters +} + +var map_RouteAdmissionPolicy = map[string]string{ + "": "RouteAdmissionPolicy is an admission policy for allowing new route claims.", + "namespaceOwnership": "namespaceOwnership describes how host name claims across namespaces should be handled.\n\nValue must be one of:\n\n- Strict: Do not allow routes in different namespaces to claim the same host.\n\n- InterNamespaceAllowed: Allow routes to claim different paths of the same\n host name across namespaces.\n\nIf empty, the default is Strict.", + "wildcardPolicy": "wildcardPolicy describes how routes with wildcard policies should be handled for the ingress controller. WildcardPolicy controls use of routes [1] exposed by the ingress controller based on the route's wildcard policy.\n\n[1] https://github.com/openshift/api/blob/master/route/v1/types.go\n\nNote: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed will cause admitted routes with a wildcard policy of Subdomain to stop working. These routes must be updated to a wildcard policy of None to be readmitted by the ingress controller.\n\nWildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values.\n\nIf empty, defaults to \"WildcardsDisallowed\".", +} + +func (RouteAdmissionPolicy) SwaggerDoc() map[string]string { + return map_RouteAdmissionPolicy +} + +var map_SyslogLoggingDestinationParameters = map[string]string{ + "": "SyslogLoggingDestinationParameters describes parameters for the Syslog logging destination type.", + "address": "address is the IP address of the syslog endpoint that receives log messages.", + "port": "port is the UDP port number of the syslog endpoint that receives log messages.", + "facility": "facility specifies the syslog facility of log messages.\n\nIf this field is empty, the facility is \"local1\".", +} + +func (SyslogLoggingDestinationParameters) SwaggerDoc() map[string]string { + return map_SyslogLoggingDestinationParameters +} + +var map_KubeAPIServer = map[string]string{ + "": "KubeAPIServer provides information to configure an operator to manage kube-apiserver.", + "spec": "spec is the specification of the desired behavior of the Kubernetes API Server", + "status": "status is the most recently observed status of the Kubernetes API Server", +} + +func (KubeAPIServer) SwaggerDoc() map[string]string { + return map_KubeAPIServer +} + +var map_KubeAPIServerList = map[string]string{ + "": "KubeAPIServerList is a collection of items", + "items": "Items contains the items", +} + +func (KubeAPIServerList) SwaggerDoc() map[string]string { + return map_KubeAPIServerList +} + +var map_KubeControllerManager = map[string]string{ + "": "KubeControllerManager provides information to configure an operator to manage kube-controller-manager.", + "spec": "spec is the specification of the desired behavior of the Kubernetes Controller Manager", + "status": "status is the most recently observed status of the Kubernetes Controller Manager", +} + +func (KubeControllerManager) SwaggerDoc() map[string]string { + return map_KubeControllerManager +} + +var map_KubeControllerManagerList = map[string]string{ + "": "KubeControllerManagerList is a collection of items", + "items": "Items contains the items", +} + +func (KubeControllerManagerList) SwaggerDoc() map[string]string { + return map_KubeControllerManagerList +} + +var map_KubeStorageVersionMigrator = map[string]string{ + "": "KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator.", +} + +func (KubeStorageVersionMigrator) SwaggerDoc() map[string]string { + return map_KubeStorageVersionMigrator +} + +var map_KubeStorageVersionMigratorList = map[string]string{ + "": "KubeStorageVersionMigratorList is a collection of items", + "items": "Items contains the items", +} + +func (KubeStorageVersionMigratorList) SwaggerDoc() map[string]string { + return map_KubeStorageVersionMigratorList +} + +var map_AdditionalNetworkDefinition = map[string]string{ + "": "AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one \"Config\" that matches the type.", + "type": "type is the type of network The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan", + "name": "name is the name of the network. This will be populated in the resulting CRD This must be unique.", + "namespace": "namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace.", + "rawCNIConfig": "rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD", + "simpleMacvlanConfig": "SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan", +} + +func (AdditionalNetworkDefinition) SwaggerDoc() map[string]string { + return map_AdditionalNetworkDefinition +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_DefaultNetworkDefinition = map[string]string{ + "": "DefaultNetworkDefinition represents a single network plugin's configuration. type must be specified, along with exactly one \"Config\" that matches the type.", + "type": "type is the type of network All NetworkTypes are supported except for NetworkTypeRaw", + "openshiftSDNConfig": "openShiftSDNConfig configures the openshift-sdn plugin", + "ovnKubernetesConfig": "oVNKubernetesConfig configures the ovn-kubernetes plugin. This is currently not implemented.", + "kuryrConfig": "KuryrConfig configures the kuryr plugin", +} + +func (DefaultNetworkDefinition) SwaggerDoc() map[string]string { + return map_DefaultNetworkDefinition +} + +var map_ExportNetworkFlows = map[string]string{ + "netFlow": "netFlow defines the NetFlow configuration.", + "sFlow": "sFlow defines the SFlow configuration.", + "ipfix": "ipfix defines IPFIX configuration.", +} + +func (ExportNetworkFlows) SwaggerDoc() map[string]string { + return map_ExportNetworkFlows +} + +var map_HybridOverlayConfig = map[string]string{ + "hybridClusterNetwork": "HybridClusterNetwork defines a network space given to nodes on an additional overlay network.", + "hybridOverlayVXLANPort": "HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789", +} + +func (HybridOverlayConfig) SwaggerDoc() map[string]string { + return map_HybridOverlayConfig +} + +var map_IPAMConfig = map[string]string{ + "": "IPAMConfig contains configurations for IPAM (IP Address Management)", + "type": "Type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic", + "staticIPAMConfig": "StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic", +} + +func (IPAMConfig) SwaggerDoc() map[string]string { + return map_IPAMConfig +} + +var map_IPFIXConfig = map[string]string{ + "collectors": "ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items", +} + +func (IPFIXConfig) SwaggerDoc() map[string]string { + return map_IPFIXConfig +} + +var map_KuryrConfig = map[string]string{ + "": "KuryrConfig configures the Kuryr-Kubernetes SDN", + "daemonProbesPort": "The port kuryr-daemon will listen for readiness and liveness requests.", + "controllerProbesPort": "The port kuryr-controller will listen for readiness and liveness requests.", + "openStackServiceNetwork": "openStackServiceNetwork contains the CIDR of network from which to allocate IPs for OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses two IPs from that network for each loadbalancer - one given by OpenShift and second for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix size by 1.", + "enablePortPoolsPrepopulation": "enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact that it takes a significant amount of time to create one. Instead of creating it when pod is being deployed, Kuryr keeps a number of ports ready to be attached to pods. By default port prepopulation is disabled.", + "poolMaxPorts": "poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. If the number of ports exceeds this setting, free ports will get deleted. Setting 0 will disable this upper bound, effectively preventing pools from shrinking and this is the default value. For more information about port pools see enablePortPoolsPrepopulation setting.", + "poolMinPorts": "poolMinPorts sets a minimum number of free ports that should be kept in a port pool. If the number of ports is lower than this setting, new ports will get created and added to pool. The default is 1. For more information about port pools see enablePortPoolsPrepopulation setting.", + "poolBatchPorts": "poolBatchPorts sets a number of ports that should be created in a single batch request to extend the port pool. The default is 3. For more information about port pools see enablePortPoolsPrepopulation setting.", + "mtu": "mtu is the MTU that Kuryr should use when creating pod networks in Neutron. The value has to be lower or equal to the MTU of the nodes network and Neutron has to allow creation of tenant networks with such MTU. If unset Pod networks will be created with the same MTU as the nodes network has.", +} + +func (KuryrConfig) SwaggerDoc() map[string]string { + return map_KuryrConfig +} + +var map_NetFlowConfig = map[string]string{ + "collectors": "netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. It is a list of strings formatted as ip:port with a maximum of ten items", +} + +func (NetFlowConfig) SwaggerDoc() map[string]string { + return map_NetFlowConfig +} + +var map_Network = map[string]string{ + "": "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator.", +} + +func (Network) SwaggerDoc() map[string]string { + return map_Network +} + +var map_NetworkList = map[string]string{ + "": "NetworkList contains a list of Network configurations", +} + +func (NetworkList) SwaggerDoc() map[string]string { + return map_NetworkList +} + +var map_NetworkMigration = map[string]string{ + "": "NetworkMigration represents the cluster network configuration.", + "networkType": "networkType is the target type of network migration The supported values are OpenShiftSDN, OVNKubernetes", +} + +func (NetworkMigration) SwaggerDoc() map[string]string { + return map_NetworkMigration +} + +var map_NetworkSpec = map[string]string{ + "": "NetworkSpec is the top-level network configuration object.", + "clusterNetwork": "clusterNetwork is the IP address pool to use for pod IPs. Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr.", + "serviceNetwork": "serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth.", + "defaultNetwork": "defaultNetwork is the \"default\" network that all pods will receive", + "additionalNetworks": "additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled.", + "disableMultiNetwork": "disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled.", + "useMultiNetworkPolicy": "useMultiNetworkPolicy enables a controller which allows for MultiNetworkPolicy objects to be used on additional networks as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy objects, but NetworkPolicy objects only apply to the primary interface. With MultiNetworkPolicy, you can control the traffic that a pod can receive over the secondary interfaces. If unset, this property defaults to 'false' and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is 'true' then the value of this field is ignored.", + "deployKubeProxy": "deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when OpenShift SDN and ovn-kubernetes are used and true otherwise.", + "disableNetworkDiagnostics": "disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck CRs from a test pod to every node, apiserver and LB should be disabled or not. If unset, this property defaults to 'false' and network diagnostics is enabled. Setting this to 'true' would reduce the additional load of the pods performing the checks.", + "kubeProxyConfig": "kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift directly. Not consumed by all network providers - currently only openshift-sdn.", + "exportNetworkFlows": "exportNetworkFlows enables and configures the export of network flow metadata from the pod network by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. If unset, flows will not be exported to any collector.", + "migration": "migration enables and configures the cluster network migration. Setting this to the target network type to allow changing the default network. If unset, the operation of changing cluster default network plugin will be rejected.", +} + +func (NetworkSpec) SwaggerDoc() map[string]string { + return map_NetworkSpec +} + +var map_NetworkStatus = map[string]string{ + "": "NetworkStatus is detailed operator status, which is distilled up to the Network clusteroperator object.", +} + +func (NetworkStatus) SwaggerDoc() map[string]string { + return map_NetworkStatus +} + +var map_OVNKubernetesConfig = map[string]string{ + "": "ovnKubernetesConfig contains the configuration parameters for networks using the ovn-kubernetes network project", + "mtu": "mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400", + "genevePort": "geneve port is the UDP port to be used by geneve encapulation. Default is 6081", + "hybridOverlayConfig": "HybridOverlayConfig configures an additional overlay network for peers that are not using OVN.", + "ipsecConfig": "ipsecConfig enables and configures IPsec for pods on the pod network within the cluster.", + "policyAuditConfig": "policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used.", +} + +func (OVNKubernetesConfig) SwaggerDoc() map[string]string { + return map_OVNKubernetesConfig +} + +var map_OpenShiftSDNConfig = map[string]string{ + "": "OpenShiftSDNConfig configures the three openshift-sdn plugins", + "mode": "mode is one of \"Multitenant\", \"Subnet\", or \"NetworkPolicy\"", + "vxlanPort": "vxlanPort is the port to use for all vxlan packets. The default is 4789.", + "mtu": "mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink.", + "useExternalOpenvswitch": "useExternalOpenvswitch used to control whether the operator would deploy an OVS DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always run as a system service, and this flag is ignored. DEPRECATED: non-functional as of 4.6", + "enableUnidling": "enableUnidling controls whether or not the service proxy will support idling and unidling of services. By default, unidling is enabled.", +} + +func (OpenShiftSDNConfig) SwaggerDoc() map[string]string { + return map_OpenShiftSDNConfig +} + +var map_PolicyAuditConfig = map[string]string{ + "rateLimit": "rateLimit is the approximate maximum number of messages to generate per-second per-node. If unset the default of 20 msg/sec is used.", + "maxFileSize": "maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs Units are in MB and the Default is 50MB", + "destination": "destination is the location for policy log messages. Regardless of this config, persistent logs will always be dumped to the host at /var/log/ovn/ however Additionally syslog output may be configured as follows. Valid values are: - \"libc\" -> to use the libc syslog() function of the host node's journdald process - \"udp:host:port\" -> for sending syslog over UDP - \"unix:file\" -> for using the UNIX domain socket directly - \"null\" -> to discard all messages logged to syslog The default is \"null\"", + "syslogFacility": "syslogFacility the RFC5424 facility for generated messages, e.g. \"kern\". Default is \"local0\"", +} + +func (PolicyAuditConfig) SwaggerDoc() map[string]string { + return map_PolicyAuditConfig +} + +var map_ProxyConfig = map[string]string{ + "": "ProxyConfig defines the configuration knobs for kubeproxy All of these are optional and have sensible defaults", + "iptablesSyncPeriod": "An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted in large clusters for performance reasons, but this is no longer necessary, and there is no reason to change this from the default value. Default: 30s", + "bindAddress": "The address to \"bind\" on Defaults to 0.0.0.0", + "proxyArguments": "Any additional arguments to pass to the kubeproxy process", +} + +func (ProxyConfig) SwaggerDoc() map[string]string { + return map_ProxyConfig +} + +var map_SFlowConfig = map[string]string{ + "collectors": "sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items", +} + +func (SFlowConfig) SwaggerDoc() map[string]string { + return map_SFlowConfig +} + +var map_SimpleMacvlanConfig = map[string]string{ + "": "SimpleMacvlanConfig contains configurations for macvlan interface.", + "master": "master is the host interface to create the macvlan interface from. If not specified, it will be default route interface", + "ipamConfig": "IPAMConfig configures IPAM module will be used for IP Address Management (IPAM).", + "mode": "mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge", + "mtu": "mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value.", +} + +func (SimpleMacvlanConfig) SwaggerDoc() map[string]string { + return map_SimpleMacvlanConfig +} + +var map_StaticIPAMAddresses = map[string]string{ + "": "StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses", + "address": "Address is the IP address in CIDR format", + "gateway": "Gateway is IP inside of subnet to designate as the gateway", +} + +func (StaticIPAMAddresses) SwaggerDoc() map[string]string { + return map_StaticIPAMAddresses +} + +var map_StaticIPAMConfig = map[string]string{ + "": "StaticIPAMConfig contains configurations for static IPAM (IP Address Management)", + "addresses": "Addresses configures IP address for the interface", + "routes": "Routes configures IP routes for the interface", + "dns": "DNS configures DNS for the interface", +} + +func (StaticIPAMConfig) SwaggerDoc() map[string]string { + return map_StaticIPAMConfig +} + +var map_StaticIPAMDNS = map[string]string{ + "": "StaticIPAMDNS provides DNS related information for static IPAM", + "nameservers": "Nameservers points DNS servers for IP lookup", + "domain": "Domain configures the domainname the local domain used for short hostname lookups", + "search": "Search configures priority ordered search domains for short hostname lookups", +} + +func (StaticIPAMDNS) SwaggerDoc() map[string]string { + return map_StaticIPAMDNS +} + +var map_StaticIPAMRoutes = map[string]string{ + "": "StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes", + "destination": "Destination points the IP route destination", + "gateway": "Gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin).", +} + +func (StaticIPAMRoutes) SwaggerDoc() map[string]string { + return map_StaticIPAMRoutes +} + +var map_OpenShiftAPIServer = map[string]string{ + "": "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver.", + "spec": "spec is the specification of the desired behavior of the OpenShift API Server.", + "status": "status defines the observed status of the OpenShift API Server.", +} + +func (OpenShiftAPIServer) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServer +} + +var map_OpenShiftAPIServerList = map[string]string{ + "": "OpenShiftAPIServerList is a collection of items", + "items": "Items contains the items", +} + +func (OpenShiftAPIServerList) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServerList +} + +var map_OpenShiftAPIServerStatus = map[string]string{ + "latestAvailableRevision": "latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", +} + +func (OpenShiftAPIServerStatus) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServerStatus +} + +var map_OpenShiftControllerManager = map[string]string{ + "": "OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager.", +} + +func (OpenShiftControllerManager) SwaggerDoc() map[string]string { + return map_OpenShiftControllerManager +} + +var map_OpenShiftControllerManagerList = map[string]string{ + "": "OpenShiftControllerManagerList is a collection of items", + "items": "Items contains the items", +} + +func (OpenShiftControllerManagerList) SwaggerDoc() map[string]string { + return map_OpenShiftControllerManagerList +} + +var map_KubeScheduler = map[string]string{ + "": "KubeScheduler provides information to configure an operator to manage scheduler.", + "spec": "spec is the specification of the desired behavior of the Kubernetes Scheduler", + "status": "status is the most recently observed status of the Kubernetes Scheduler", +} + +func (KubeScheduler) SwaggerDoc() map[string]string { + return map_KubeScheduler +} + +var map_KubeSchedulerList = map[string]string{ + "": "KubeSchedulerList is a collection of items", + "items": "Items contains the items", +} + +func (KubeSchedulerList) SwaggerDoc() map[string]string { + return map_KubeSchedulerList +} + +var map_ServiceCA = map[string]string{ + "": "ServiceCA provides information to configure an operator to manage the service cert controllers", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (ServiceCA) SwaggerDoc() map[string]string { + return map_ServiceCA +} + +var map_ServiceCAList = map[string]string{ + "": "ServiceCAList is a collection of items", + "items": "Items contains the items", +} + +func (ServiceCAList) SwaggerDoc() map[string]string { + return map_ServiceCAList +} + +var map_ServiceCatalogAPIServer = map[string]string{ + "": "ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server DEPRECATED: will be removed in 4.6", +} + +func (ServiceCatalogAPIServer) SwaggerDoc() map[string]string { + return map_ServiceCatalogAPIServer +} + +var map_ServiceCatalogAPIServerList = map[string]string{ + "": "ServiceCatalogAPIServerList is a collection of items DEPRECATED: will be removed in 4.6", + "items": "Items contains the items", +} + +func (ServiceCatalogAPIServerList) SwaggerDoc() map[string]string { + return map_ServiceCatalogAPIServerList +} + +var map_ServiceCatalogControllerManager = map[string]string{ + "": "ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager DEPRECATED: will be removed in 4.6", +} + +func (ServiceCatalogControllerManager) SwaggerDoc() map[string]string { + return map_ServiceCatalogControllerManager +} + +var map_ServiceCatalogControllerManagerList = map[string]string{ + "": "ServiceCatalogControllerManagerList is a collection of items DEPRECATED: will be removed in 4.6", + "items": "Items contains the items", +} + +func (ServiceCatalogControllerManagerList) SwaggerDoc() map[string]string { + return map_ServiceCatalogControllerManagerList +} + +var map_Storage = map[string]string{ + "": "Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Storage) SwaggerDoc() map[string]string { + return map_Storage +} + +var map_StorageList = map[string]string{ + "": "StorageList contains a list of Storages.", +} + +func (StorageList) SwaggerDoc() map[string]string { + return map_StorageList +} + +var map_StorageSpec = map[string]string{ + "": "StorageSpec is the specification of the desired behavior of the cluster storage operator.", +} + +func (StorageSpec) SwaggerDoc() map[string]string { + return map_StorageSpec +} + +var map_StorageStatus = map[string]string{ + "": "StorageStatus defines the observed status of the cluster storage operator.", +} + +func (StorageStatus) SwaggerDoc() map[string]string { + return map_StorageStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/route/v1/generated.pb.go b/vendor/github.com/openshift/api/route/v1/generated.pb.go index 977fa2618d..3eba70d0d1 100644 --- a/vendor/github.com/openshift/api/route/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/route/v1/generated.pb.go @@ -1391,10 +1391,7 @@ func (m *Route) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1606,10 +1603,7 @@ func (m *RouteIngress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1823,10 +1817,7 @@ func (m *RouteIngressCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1943,10 +1934,7 @@ func (m *RouteList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2029,10 +2017,7 @@ func (m *RoutePort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2349,10 +2334,7 @@ func (m *RouteSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2436,10 +2418,7 @@ func (m *RouteStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2573,10 +2552,7 @@ func (m *RouteTargetReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2690,10 +2666,7 @@ func (m *RouterShard) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2935,10 +2908,7 @@ func (m *TLSConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go index af3e3a6ad1..0931bdecc7 100644 --- a/vendor/github.com/openshift/api/route/v1/types.go +++ b/vendor/github.com/openshift/api/route/v1/types.go @@ -303,3 +303,11 @@ const ( // needs to use routes with invalid hosts. AllowNonDNSCompliantHostAnnotation = "route.openshift.io/allow-non-dns-compliant-host" ) + +// Ingress-to-route controller +const ( + // IngressToRouteIngressClassControllerName is the name of the + // controller that translates ingresses into routes. This value is + // intended to be used for the spec.controller field of ingressclasses. + IngressToRouteIngressClassControllerName = "openshift.io/ingress-to-route" +) diff --git a/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml b/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml index 480f7b2730..0293bf73ad 100644 --- a/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml +++ b/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml @@ -1,362 +1,279 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: securitycontextconstraints.security.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: securitycontextconstraints.security.openshift.io spec: group: security.openshift.io - scope: Cluster names: kind: SecurityContextConstraints listKind: SecurityContextConstraintsList plural: securitycontextconstraints singular: securitycontextconstraints + scope: Cluster versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: Priv - type: string - jsonPath: .allowPrivilegedContainer - description: Determines if a container can request to be run as privileged - - name: Caps - type: string - jsonPath: .allowedCapabilities - description: A list of capabilities that can be requested to add to the container - - name: SELinux - type: string - jsonPath: .seLinuxContext.type - description: Strategy that will dictate what labels will be set in the SecurityContext - - name: RunAsUser - type: string - jsonPath: .runAsUser.type - description: Strategy that will dictate what RunAsUser is used in the SecurityContext - - name: FSGroup - type: string - jsonPath: .fsGroup.type - description: Strategy that will dictate what fs group is used by the SecurityContext - - name: SupGroup - type: string - jsonPath: .supplementalGroups.type - description: Strategy that will dictate what supplemental groups are used by - the SecurityContext - - name: Priority - type: string - jsonPath: .priority - description: Sort order of SCCs - - name: ReadOnlyRootFS - type: string - jsonPath: .readOnlyRootFilesystem - description: Force containers to run with a read only root file system - - name: Volumes - type: string - jsonPath: .volumes - description: White list of allowed volume plugins - schema: - openAPIV3Schema: - description: SecurityContextConstraints governs the ability to make requests - that affect the SecurityContext that will be applied to a container. For - historical reasons SCC was exposed under the core Kubernetes API group. - That exposure is deprecated and will be removed in a future release - users - should instead use the security.openshift.io group to manage SecurityContextConstraints. - type: object - required: - - allowHostDirVolumePlugin - - allowHostIPC - - allowHostNetwork - - allowHostPID - - allowHostPorts - - allowPrivilegedContainer - - allowedCapabilities - - defaultAddCapabilities - - priority - - readOnlyRootFilesystem - - requiredDropCapabilities - - volumes - properties: - allowHostDirVolumePlugin: - description: AllowHostDirVolumePlugin determines if the policy allow containers - to use the HostDir volume plugin - type: boolean - allowHostIPC: - description: AllowHostIPC determines if the policy allows host ipc in - the containers. - type: boolean - allowHostNetwork: - description: AllowHostNetwork determines if the policy allows the use - of HostNetwork in the pod spec. - type: boolean - allowHostPID: - description: AllowHostPID determines if the policy allows host pid in - the containers. - type: boolean - allowHostPorts: - description: AllowHostPorts determines if the policy allows host ports - in the containers. - type: boolean - allowPrivilegeEscalation: - description: AllowPrivilegeEscalation determines if a pod can request - to allow privilege escalation. If unspecified, defaults to true. - type: boolean - nullable: true - allowPrivilegedContainer: - description: AllowPrivilegedContainer determines if a container can request - to be run as privileged. - type: boolean - allowedCapabilities: - description: AllowedCapabilities is a list of capabilities that can be - requested to add to the container. Capabilities in this field maybe - added at the pod author's discretion. You must not list a capability - in both AllowedCapabilities and RequiredDropCapabilities. To allow all - capabilities you may use '*'. - type: array - items: - description: Capability represent POSIX capabilities type + - additionalPrinterColumns: + - description: Determines if a container can request to be run as privileged + jsonPath: .allowPrivilegedContainer + name: Priv + type: string + - description: A list of capabilities that can be requested to add to the container + jsonPath: .allowedCapabilities + name: Caps + type: string + - description: Strategy that will dictate what labels will be set in the SecurityContext + jsonPath: .seLinuxContext.type + name: SELinux + type: string + - description: Strategy that will dictate what RunAsUser is used in the SecurityContext + jsonPath: .runAsUser.type + name: RunAsUser + type: string + - description: Strategy that will dictate what fs group is used by the SecurityContext + jsonPath: .fsGroup.type + name: FSGroup + type: string + - description: Strategy that will dictate what supplemental groups are used by the SecurityContext + jsonPath: .supplementalGroups.type + name: SupGroup + type: string + - description: Sort order of SCCs + jsonPath: .priority + name: Priority + type: string + - description: Force containers to run with a read only root file system + jsonPath: .readOnlyRootFilesystem + name: ReadOnlyRootFS + type: string + - description: White list of allowed volume plugins + jsonPath: .volumes + name: Volumes + type: string + name: v1 + schema: + openAPIV3Schema: + description: SecurityContextConstraints governs the ability to make requests that affect the SecurityContext that will be applied to a container. For historical reasons SCC was exposed under the core Kubernetes API group. That exposure is deprecated and will be removed in a future release - users should instead use the security.openshift.io group to manage SecurityContextConstraints. + type: object + required: + - allowHostDirVolumePlugin + - allowHostIPC + - allowHostNetwork + - allowHostPID + - allowHostPorts + - allowPrivilegedContainer + - allowedCapabilities + - defaultAddCapabilities + - priority + - readOnlyRootFilesystem + - requiredDropCapabilities + - volumes + properties: + allowHostDirVolumePlugin: + description: AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin + type: boolean + allowHostIPC: + description: AllowHostIPC determines if the policy allows host ipc in the containers. + type: boolean + allowHostNetwork: + description: AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + type: boolean + allowHostPID: + description: AllowHostPID determines if the policy allows host pid in the containers. + type: boolean + allowHostPorts: + description: AllowHostPorts determines if the policy allows host ports in the containers. + type: boolean + allowPrivilegeEscalation: + description: AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true. + type: boolean + nullable: true + allowPrivilegedContainer: + description: AllowPrivilegedContainer determines if a container can request to be run as privileged. + type: boolean + allowedCapabilities: + description: AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'. + type: array + items: + description: Capability represent POSIX capabilities type + type: string + nullable: true + allowedFlexVolumes: + description: AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the "Volumes" field. + type: array + items: + description: AllowedFlexVolume represents a single Flexvolume that is allowed to be used. + type: object + required: + - driver + properties: + driver: + description: Driver is the name of the Flexvolume driver. + type: string + nullable: true + allowedUnsafeSysctls: + description: "AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. \n Examples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc." + type: array + items: + type: string + nullable: true + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string - nullable: true - allowedFlexVolumes: - description: AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty - or nil indicates that all Flexvolumes may be used. This parameter is - effective only when the usage of the Flexvolumes is allowed in the "Volumes" - field. - type: array - items: - description: AllowedFlexVolume represents a single Flexvolume that is - allowed to be used. + defaultAddCapabilities: + description: DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities. + type: array + items: + description: Capability represent POSIX capabilities type + type: string + nullable: true + defaultAllowPrivilegeEscalation: + description: DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process. + type: boolean + nullable: true + forbiddenSysctls: + description: "ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. \n Examples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc." + type: array + items: + type: string + nullable: true + fsGroup: + description: FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. type: object - required: - - driver properties: - driver: - description: Driver is the name of the Flexvolume driver. + ranges: + description: Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. + type: array + items: + description: 'IDRange provides a min/max of an allowed range of IDs. TODO: this could be reused for UIDs.' + type: object + properties: + max: + description: Max is the end of the range, inclusive. + type: integer + format: int64 + min: + description: Min is the start of the range, inclusive. + type: integer + format: int64 + type: + description: Type is the strategy that will dictate what FSGroup is used in the SecurityContext. type: string - nullable: true - allowedUnsafeSysctls: - description: "AllowedUnsafeSysctls is a list of explicitly allowed unsafe - sysctls, defaults to none. Each entry is either a plain sysctl name - or ends in \"*\" in which case it is considered as a prefix of allowed - sysctls. Single * means all unsafe sysctls are allowed. Kubelet has - to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - \n Examples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. - \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc." - type: array - items: - type: string - nullable: true - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - defaultAddCapabilities: - description: DefaultAddCapabilities is the default set of capabilities - that will be added to the container unless the pod spec specifically - drops the capability. You may not list a capabiility in both DefaultAddCapabilities - and RequiredDropCapabilities. - type: array - items: - description: Capability represent POSIX capabilities type - type: string - nullable: true - defaultAllowPrivilegeEscalation: - description: DefaultAllowPrivilegeEscalation controls the default setting - for whether a process can gain more privileges than its parent process. - type: boolean - nullable: true - forbiddenSysctls: - description: "ForbiddenSysctls is a list of explicitly forbidden sysctls, - defaults to none. Each entry is either a plain sysctl name or ends in - \"*\" in which case it is considered as a prefix of forbidden sysctls. - Single * means all sysctls are forbidden. \n Examples: e.g. \"foo/*\" - forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", - \"foo.baz\", etc." - type: array - items: - type: string - nullable: true - fsGroup: - description: FSGroup is the strategy that will dictate what fs group is - used by the SecurityContext. - type: object - properties: - ranges: - description: Ranges are the allowed ranges of fs groups. If you would - like to force a single fs group then supply a single range with - the same start and end. - type: array - items: - description: 'IDRange provides a min/max of an allowed range of - IDs. TODO: this could be reused for UIDs.' - type: object - properties: - max: - description: Max is the end of the range, inclusive. - type: integer - format: int64 - min: - description: Min is the start of the range, inclusive. - type: integer - format: int64 - type: - description: Type is the strategy that will dictate what FSGroup is - used in the SecurityContext. + nullable: true + groups: + description: The groups that have permission to use this security context constraints + type: array + items: type: string - nullable: true - groups: - description: The groups that have permission to use this security context - constraints - type: array - items: - type: string - nullable: true - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - priority: - description: Priority influences the sort order of SCCs when evaluating - which SCCs to try first for a given pod request based on access in the - Users and Groups fields. The higher the int, the higher priority. An - unset value is considered a 0 priority. If scores for multiple SCCs - are equal they will be sorted from most restrictive to least restrictive. - If both priorities and restrictions are equal the SCCs will be sorted - by name. - type: integer - format: int32 - nullable: true - readOnlyRootFilesystem: - description: ReadOnlyRootFilesystem when set to true will force containers - to run with a read only root file system. If the container specifically - requests to run with a non-read only root file system the SCC should - deny the pod. If set to false the container may run with a read only - root file system if it wishes but it will not be forced to. - type: boolean - requiredDropCapabilities: - description: RequiredDropCapabilities are the capabilities that will be - dropped from the container. These are required to be dropped and cannot - be added. - type: array - items: - description: Capability represent POSIX capabilities type + nullable: true + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string - nullable: true - runAsUser: - description: RunAsUser is the strategy that will dictate what RunAsUser - is used in the SecurityContext. - type: object - properties: - type: - description: Type is the strategy that will dictate what RunAsUser - is used in the SecurityContext. - type: string - uid: - description: UID is the user id that containers must run as. Required - for the MustRunAs strategy if not using namespace/service account - allocated uids. - type: integer - format: int64 - uidRangeMax: - description: UIDRangeMax defines the max value for a strategy that - allocates by range. - type: integer - format: int64 - uidRangeMin: - description: UIDRangeMin defines the min value for a strategy that - allocates by range. - type: integer - format: int64 - nullable: true - seLinuxContext: - description: SELinuxContext is the strategy that will dictate what labels - will be set in the SecurityContext. - type: object - properties: - seLinuxOptions: - description: seLinuxOptions required to run as; required for MustRunAs - type: object - properties: - level: - description: Level is SELinux level label that applies to the - container. - type: string - role: - description: Role is a SELinux role label that applies to the - container. - type: string - type: - description: Type is a SELinux type label that applies to the - container. - type: string - user: - description: User is a SELinux user label that applies to the - container. - type: string - type: - description: Type is the strategy that will dictate what SELinux context - is used in the SecurityContext. + metadata: + type: object + priority: + description: Priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name. + type: integer + format: int32 + nullable: true + readOnlyRootFilesystem: + description: ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to. + type: boolean + requiredDropCapabilities: + description: RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added. + type: array + items: + description: Capability represent POSIX capabilities type type: string - nullable: true - seccompProfiles: - description: "SeccompProfiles lists the allowed profiles that may be set - for the pod or container's seccomp annotations. An unset (nil) or empty - value means that no profiles may be specifid by the pod or container.\tThe - wildcard '*' may be used to allow all profiles. When used to generate - a value for a pod the first non-wildcard profile will be used as the - default." - type: array - items: - type: string - nullable: true - supplementalGroups: - description: SupplementalGroups is the strategy that will dictate what - supplemental groups are used by the SecurityContext. - type: object - properties: - ranges: - description: Ranges are the allowed ranges of supplemental groups. If - you would like to force a single supplemental group then supply - a single range with the same start and end. - type: array - items: - description: 'IDRange provides a min/max of an allowed range of - IDs. TODO: this could be reused for UIDs.' + nullable: true + runAsUser: + description: RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. + type: object + properties: + type: + description: Type is the strategy that will dictate what RunAsUser is used in the SecurityContext. + type: string + uid: + description: UID is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids. + type: integer + format: int64 + uidRangeMax: + description: UIDRangeMax defines the max value for a strategy that allocates by range. + type: integer + format: int64 + uidRangeMin: + description: UIDRangeMin defines the min value for a strategy that allocates by range. + type: integer + format: int64 + nullable: true + seLinuxContext: + description: SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. + type: object + properties: + seLinuxOptions: + description: seLinuxOptions required to run as; required for MustRunAs type: object properties: - max: - description: Max is the end of the range, inclusive. - type: integer - format: int64 - min: - description: Min is the start of the range, inclusive. - type: integer - format: int64 - type: - description: Type is the strategy that will dictate what supplemental - groups is used in the SecurityContext. + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: + description: Type is the strategy that will dictate what SELinux context is used in the SecurityContext. + type: string + nullable: true + seccompProfiles: + description: "SeccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default." + type: array + items: type: string - nullable: true - users: - description: The users who have permissions to use this security context - constraints - type: array - items: - type: string - nullable: true - volumes: - description: Volumes is a white list of allowed volume plugins. FSType - corresponds directly with the field names of a VolumeSource (azureFile, - configMap, emptyDir). To allow all volumes you may use "*". To allow - no volumes, set to ["none"]. - type: array - items: - description: FS Type gives strong typing to different file systems that - are used by volumes. - type: string - nullable: true + nullable: true + supplementalGroups: + description: SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + type: object + properties: + ranges: + description: Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. + type: array + items: + description: 'IDRange provides a min/max of an allowed range of IDs. TODO: this could be reused for UIDs.' + type: object + properties: + max: + description: Max is the end of the range, inclusive. + type: integer + format: int64 + min: + description: Min is the start of the range, inclusive. + type: integer + format: int64 + type: + description: Type is the strategy that will dictate what supplemental groups is used in the SecurityContext. + type: string + nullable: true + users: + description: The users who have permissions to use this security context constraints + type: array + items: + type: string + nullable: true + volumes: + description: Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". To allow no volumes, set to ["none"]. + type: array + items: + description: FS Type gives strong typing to different file systems that are used by volumes. + type: string + nullable: true + served: true + storage: true diff --git a/vendor/github.com/openshift/api/security/v1/generated.pb.go b/vendor/github.com/openshift/api/security/v1/generated.pb.go index 7ee402afa3..c4225bf5c0 100644 --- a/vendor/github.com/openshift/api/security/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/security/v1/generated.pb.go @@ -2437,10 +2437,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2556,10 +2553,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2647,10 +2641,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2766,10 +2757,7 @@ func (m *PodSecurityPolicyReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2884,10 +2872,7 @@ func (m *PodSecurityPolicyReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2971,10 +2956,7 @@ func (m *PodSecurityPolicyReviewStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3090,10 +3072,7 @@ func (m *PodSecurityPolicySelfSubjectReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3176,10 +3155,7 @@ func (m *PodSecurityPolicySelfSubjectReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3295,10 +3271,7 @@ func (m *PodSecurityPolicySubjectReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3445,10 +3418,7 @@ func (m *PodSecurityPolicySubjectReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3599,10 +3569,7 @@ func (m *PodSecurityPolicySubjectReviewStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3751,10 +3718,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3871,10 +3835,7 @@ func (m *RangeAllocationList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4016,10 +3977,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4137,10 +4095,7 @@ func (m *SELinuxContextStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4879,10 +4834,7 @@ func (m *SecurityContextConstraints) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4999,10 +4951,7 @@ func (m *SecurityContextConstraintsList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5117,10 +5066,7 @@ func (m *ServiceAccountPodSecurityPolicyReviewStatus) Unmarshal(dAtA []byte) err if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5236,10 +5182,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go new file mode 100644 index 0000000000..e12ae1bfca --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go @@ -0,0 +1,280 @@ +package factory + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/robfig/cron" + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/v1helpers" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// SyntheticRequeueError can be returned from sync() in case of forcing a sync() retry artificially. +// This can be also done by re-adding the key to queue, but this is cheaper and more convenient. +var SyntheticRequeueError = errors.New("synthetic requeue request") + +var defaultCacheSyncTimeout = 10 * time.Minute + +// baseController represents generic Kubernetes controller boiler-plate +type baseController struct { + name string + cachesToSync []cache.InformerSynced + sync func(ctx context.Context, controllerContext SyncContext) error + syncContext SyncContext + syncDegradedClient operatorv1helpers.OperatorClient + resyncEvery time.Duration + resyncSchedules []cron.Schedule + postStartHooks []PostStartHook + cacheSyncTimeout time.Duration +} + +var _ Controller = &baseController{} + +func (c baseController) Name() string { + return c.name +} + +type scheduledJob struct { + queue workqueue.RateLimitingInterface + name string +} + +func newScheduledJob(name string, queue workqueue.RateLimitingInterface) cron.Job { + return &scheduledJob{ + queue: queue, + name: name, + } +} + +func (s *scheduledJob) Run() { + klog.V(4).Infof("Triggering scheduled %q controller run", s.name) + s.queue.Add(DefaultQueueKey) +} + +func waitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) error { + klog.Infof("Waiting for caches to sync for %s", controllerName) + + if !cache.WaitForCacheSync(stopCh, cacheSyncs...) { + return fmt.Errorf("unable to sync caches for %s", controllerName) + } + + klog.Infof("Caches are synced for %s ", controllerName) + + return nil +} + +func (c *baseController) Run(ctx context.Context, workers int) { + // HandleCrash recovers panics + defer utilruntime.HandleCrash(c.degradedPanicHandler) + + // give caches 10 minutes to sync + cacheSyncCtx, cacheSyncCancel := context.WithTimeout(ctx, c.cacheSyncTimeout) + defer cacheSyncCancel() + err := waitForNamedCacheSync(c.name, cacheSyncCtx.Done(), c.cachesToSync...) + if err != nil { + select { + case <-ctx.Done(): + // Exit gracefully because the controller was requested to stop. + return + default: + // If caches did not sync after 10 minutes, it has taken oddly long and + // we should provide feedback. Since the control loops will never start, + // it is safer to exit with a good message than to continue with a dead loop. + // TODO: Consider making this behavior configurable. + klog.Exit(err) + } + } + + var workerWg sync.WaitGroup + defer func() { + defer klog.Infof("All %s workers have been terminated", c.name) + workerWg.Wait() + }() + + // queueContext is used to track and initiate queue shutdown + queueContext, queueContextCancel := context.WithCancel(context.TODO()) + + for i := 1; i <= workers; i++ { + klog.Infof("Starting #%d worker of %s controller ...", i, c.name) + workerWg.Add(1) + go func() { + defer func() { + klog.Infof("Shutting down worker of %s controller ...", c.name) + workerWg.Done() + }() + c.runWorker(queueContext) + }() + } + + // if scheduled run is requested, run the cron scheduler + if c.resyncSchedules != nil { + scheduler := cron.New() + for _, s := range c.resyncSchedules { + scheduler.Schedule(s, newScheduledJob(c.name, c.syncContext.Queue())) + } + scheduler.Start() + defer scheduler.Stop() + } + + // runPeriodicalResync is independent from queue + if c.resyncEvery > 0 { + workerWg.Add(1) + go func() { + defer workerWg.Done() + c.runPeriodicalResync(ctx, c.resyncEvery) + }() + } + + // run post-start hooks (custom triggers, etc.) + if len(c.postStartHooks) > 0 { + var hookWg sync.WaitGroup + defer func() { + hookWg.Wait() // wait for the post-start hooks + klog.Infof("All %s post start hooks have been terminated", c.name) + }() + for i := range c.postStartHooks { + hookWg.Add(1) + go func(index int) { + defer hookWg.Done() + if err := c.postStartHooks[index](ctx, c.syncContext); err != nil { + klog.Warningf("%s controller post start hook error: %v", c.name, err) + } + }(i) + } + } + + // Handle controller shutdown + + <-ctx.Done() // wait for controller context to be cancelled + c.syncContext.Queue().ShutDown() // shutdown the controller queue first + queueContextCancel() // cancel the queue context, which tell workers to initiate shutdown + + // Wait for all workers to finish their job. + // at this point the Run() can hang and caller have to implement the logic that will kill + // this controller (SIGKILL). + klog.Infof("Shutting down %s ...", c.name) +} + +func (c *baseController) Sync(ctx context.Context, syncCtx SyncContext) error { + return c.sync(ctx, syncCtx) +} + +func (c *baseController) runPeriodicalResync(ctx context.Context, interval time.Duration) { + if interval == 0 { + return + } + go wait.UntilWithContext(ctx, func(ctx context.Context) { + c.syncContext.Queue().Add(DefaultQueueKey) + }, interval) +} + +// runWorker runs a single worker +// The worker is asked to terminate when the passed context is cancelled and is given terminationGraceDuration time +// to complete its shutdown. +func (c *baseController) runWorker(queueCtx context.Context) { + wait.UntilWithContext( + queueCtx, + func(queueCtx context.Context) { + defer utilruntime.HandleCrash(c.degradedPanicHandler) + for { + select { + case <-queueCtx.Done(): + return + default: + c.processNextWorkItem(queueCtx) + } + } + }, + 1*time.Second) +} + +// reconcile wraps the sync() call and if operator client is set, it handle the degraded condition if sync() returns an error. +func (c *baseController) reconcile(ctx context.Context, syncCtx SyncContext) error { + err := c.sync(ctx, syncCtx) + degradedErr := c.reportDegraded(ctx, err) + if apierrors.IsNotFound(degradedErr) && management.IsOperatorRemovable() { + // The operator tolerates missing CR, therefore don't report it up. + return err + } + return degradedErr +} + +// degradedPanicHandler will go degraded on failures, then we should catch potential panics and covert them into bad status. +func (c *baseController) degradedPanicHandler(panicVal interface{}) { + if c.syncDegradedClient == nil { + // if we don't have a client for reporting degraded condition, then let the existing panic handler do the work + return + } + _ = c.reportDegraded(context.TODO(), fmt.Errorf("panic caught:\n%v", panicVal)) +} + +// reportDegraded updates status with an indication of degraded-ness +func (c *baseController) reportDegraded(ctx context.Context, reportedError error) error { + if c.syncDegradedClient == nil { + return reportedError + } + if reportedError != nil { + _, _, updateErr := v1helpers.UpdateStatus(c.syncDegradedClient, v1helpers.UpdateConditionFn(operatorv1.OperatorCondition{ + Type: c.name + "Degraded", + Status: operatorv1.ConditionTrue, + Reason: "SyncError", + Message: reportedError.Error(), + })) + if updateErr != nil { + klog.Warningf("Updating status of %q failed: %v", c.Name(), updateErr) + } + return reportedError + } + _, _, updateErr := v1helpers.UpdateStatus(c.syncDegradedClient, + v1helpers.UpdateConditionFn(operatorv1.OperatorCondition{ + Type: c.name + "Degraded", + Status: operatorv1.ConditionFalse, + Reason: "AsExpected", + })) + return updateErr +} + +func (c *baseController) processNextWorkItem(queueCtx context.Context) { + key, quit := c.syncContext.Queue().Get() + if quit { + return + } + defer c.syncContext.Queue().Done(key) + + syncCtx := c.syncContext.(syncContext) + var ok bool + syncCtx.queueKey, ok = key.(string) + if !ok { + utilruntime.HandleError(fmt.Errorf("%q controller failed to process key %q (not a string)", c.name, key)) + return + } + + if err := c.reconcile(queueCtx, syncCtx); err != nil { + if err == SyntheticRequeueError { + // logging this helps detecting wedged controllers with missing pre-requirements + klog.V(5).Infof("%q controller requested synthetic requeue with key %q", c.name, key) + } else { + if klog.V(4).Enabled() || key != "key" { + utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", c.name, key, err)) + } else { + utilruntime.HandleError(fmt.Errorf("%s reconciliation failed: %w", c.name, err)) + } + } + c.syncContext.Queue().AddRateLimited(key) + return + } + + c.syncContext.Queue().Forget(key) +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go new file mode 100644 index 0000000000..901fda133e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go @@ -0,0 +1,109 @@ +package factory + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + "github.com/openshift/library-go/pkg/operator/events" +) + +// syncContext implements SyncContext and provide user access to queue and object that caused +// the sync to be triggered. +type syncContext struct { + eventRecorder events.Recorder + queue workqueue.RateLimitingInterface + queueKey string +} + +var _ SyncContext = syncContext{} + +// NewSyncContext gives new sync context. +func NewSyncContext(name string, recorder events.Recorder) SyncContext { + return syncContext{ + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), + eventRecorder: recorder.WithComponentSuffix(strings.ToLower(name)), + } +} + +func (c syncContext) Queue() workqueue.RateLimitingInterface { + return c.queue +} + +func (c syncContext) QueueKey() string { + return c.queueKey +} + +func (c syncContext) Recorder() events.Recorder { + return c.eventRecorder +} + +// eventHandler provides default event handler that is added to an informers passed to controller factory. +func (c syncContext) eventHandler(queueKeyFunc ObjectQueueKeyFunc, filter EventFilterFunc) cache.ResourceEventHandler { + resourceEventHandler := cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + runtimeObj, ok := obj.(runtime.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("added object %+v is not runtime Object", obj)) + return + } + c.Queue().Add(queueKeyFunc(runtimeObj)) + }, + UpdateFunc: func(old, new interface{}) { + runtimeObj, ok := new.(runtime.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj)) + return + } + c.Queue().Add(queueKeyFunc(runtimeObj)) + }, + DeleteFunc: func(obj interface{}) { + runtimeObj, ok := obj.(runtime.Object) + if !ok { + if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { + c.Queue().Add(queueKeyFunc(tombstone.Obj.(runtime.Object))) + return + } + utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj)) + return + } + c.Queue().Add(queueKeyFunc(runtimeObj)) + }, + } + if filter == nil { + return resourceEventHandler + } + return cache.FilteringResourceEventHandler{ + FilterFunc: filter, + Handler: resourceEventHandler, + } +} + +// namespaceChecker returns a function which returns true if an inpuut obj +// (or its tombstone) is a namespace and it matches a name of any namespaces +// that we are interested in +func namespaceChecker(interestingNamespaces []string) func(obj interface{}) bool { + interestingNamespacesSet := sets.NewString(interestingNamespaces...) + + return func(obj interface{}) bool { + ns, ok := obj.(*corev1.Namespace) + if ok { + return interestingNamespacesSet.Has(ns.Name) + } + + // the object might be getting deleted + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if ok { + if ns, ok := tombstone.Obj.(*corev1.Namespace); ok { + return interestingNamespacesSet.Has(ns.Name) + } + } + return false + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go new file mode 100644 index 0000000000..b70da95481 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go @@ -0,0 +1,26 @@ +package factory + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +func ObjectNameToKey(obj runtime.Object) string { + metaObj, ok := obj.(metav1.ObjectMetaAccessor) + if !ok { + return "" + } + return metaObj.GetObjectMeta().GetName() +} + +func NamesFilter(names ...string) EventFilterFunc { + nameSet := sets.NewString(names...) + return func(obj interface{}) bool { + metaObj, ok := obj.(metav1.ObjectMetaAccessor) + if !ok { + return false + } + return nameSet.Has(metaObj.GetObjectMeta().GetName()) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go new file mode 100644 index 0000000000..09ce3abdb2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go @@ -0,0 +1,276 @@ +package factory + +import ( + "context" + "fmt" + "time" + + "github.com/robfig/cron" + "k8s.io/apimachinery/pkg/runtime" + errorutil "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/cache" + + "github.com/openshift/library-go/pkg/operator/events" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// DefaultQueueKey is the queue key used for string trigger based controllers. +const DefaultQueueKey = "key" + +// Factory is generator that generate standard Kubernetes controllers. +// Factory is really generic and should be only used for simple controllers that does not require special stuff.. +type Factory struct { + sync SyncFunc + syncContext SyncContext + syncDegradedClient operatorv1helpers.OperatorClient + resyncInterval time.Duration + resyncSchedules []string + informers []filteredInformers + informerQueueKeys []informersWithQueueKey + bareInformers []Informer + postStartHooks []PostStartHook + namespaceInformers []*namespaceInformer + cachesToSync []cache.InformerSynced + interestingNamespaces sets.String +} + +// Informer represents any structure that allow to register event handlers and informs if caches are synced. +// Any SharedInformer will comply. +type Informer interface { + AddEventHandler(handler cache.ResourceEventHandler) + HasSynced() bool +} + +type namespaceInformer struct { + informer Informer + nsFilter EventFilterFunc +} + +type informersWithQueueKey struct { + informers []Informer + filter EventFilterFunc + queueKeyFn ObjectQueueKeyFunc +} + +type filteredInformers struct { + informers []Informer + filter EventFilterFunc +} + +// PostStartHook specify a function that will run after controller is started. +// The context is cancelled when the controller is asked to shutdown and the post start hook should terminate as well. +// The syncContext allow access to controller queue and event recorder. +type PostStartHook func(ctx context.Context, syncContext SyncContext) error + +// ObjectQueueKeyFunc is used to make a string work queue key out of the runtime object that is passed to it. +// This can extract the "namespace/name" if you need to or just return "key" if you building controller that only use string +// triggers. +type ObjectQueueKeyFunc func(runtime.Object) string + +// EventFilterFunc is used to filter informer events to prevent Sync() from being called +type EventFilterFunc func(obj interface{}) bool + +// New return new factory instance. +func New() *Factory { + return &Factory{} +} + +// Sync is used to set the controller synchronization function. This function is the core of the controller and is +// usually hold the main controller logic. +func (f *Factory) WithSync(syncFn SyncFunc) *Factory { + f.sync = syncFn + return f +} + +// WithInformers is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +func (f *Factory) WithInformers(informers ...Informer) *Factory { + f.WithFilteredEventsInformers(nil, informers...) + return f +} + +// WithFilteredEventsInformers is used to register event handlers and get the caches synchronized functions. +// Pass the informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass filter to filter out events that should not trigger Sync() call. +func (f *Factory) WithFilteredEventsInformers(filter EventFilterFunc, informers ...Informer) *Factory { + f.informers = append(f.informers, filteredInformers{ + informers: informers, + filter: filter, + }) + return f +} + +// WithBareInformers allow to register informer that already has custom event handlers registered and no additional +// event handlers will be added to this informer. +// The controller will wait for the cache of this informer to be synced. +// The existing event handlers will have to respect the queue key function or the sync() implementation will have to +// count with custom queue keys. +func (f *Factory) WithBareInformers(informers ...Informer) *Factory { + f.bareInformers = append(f.bareInformers, informers...) + return f +} + +// WithInformersQueueKeyFunc is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue. +func (f *Factory) WithInformersQueueKeyFunc(queueKeyFn ObjectQueueKeyFunc, informers ...Informer) *Factory { + f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{ + informers: informers, + queueKeyFn: queueKeyFn, + }) + return f +} + +// WithFilteredEventsInformersQueueKeyFunc is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue. +// Pass filter to filter out events that should not trigger Sync() call. +func (f *Factory) WithFilteredEventsInformersQueueKeyFunc(queueKeyFn ObjectQueueKeyFunc, filter EventFilterFunc, informers ...Informer) *Factory { + f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{ + informers: informers, + filter: filter, + queueKeyFn: queueKeyFn, + }) + return f +} + +// WithPostStartHooks allows to register functions that will run asynchronously after the controller is started via Run command. +func (f *Factory) WithPostStartHooks(hooks ...PostStartHook) *Factory { + f.postStartHooks = append(f.postStartHooks, hooks...) + return f +} + +// WithNamespaceInformer is used to register event handlers and get the caches synchronized functions. +// The sync function will only trigger when the object observed by this informer is a namespace and its name matches the interestingNamespaces. +// Do not use this to register non-namespace informers. +func (f *Factory) WithNamespaceInformer(informer Informer, interestingNamespaces ...string) *Factory { + f.namespaceInformers = append(f.namespaceInformers, &namespaceInformer{ + informer: informer, + nsFilter: namespaceChecker(interestingNamespaces), + }) + return f +} + +// ResyncEvery will cause the Sync() function to be called periodically, regardless of informers. +// This is useful when you want to refresh every N minutes or you fear that your informers can be stucked. +// If this is not called, no periodical resync will happen. +// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself. +// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects. +func (f *Factory) ResyncEvery(interval time.Duration) *Factory { + f.resyncInterval = interval + return f +} + +// ResyncSchedule allows to supply a Cron syntax schedule that will be used to schedule the sync() call runs. +// This allows more fine-tuned controller scheduling than ResyncEvery. +// Examples: +// +// factory.New().ResyncSchedule("@every 1s").ToController() // Every second +// factory.New().ResyncSchedule("@hourly").ToController() // Every hour +// factory.New().ResyncSchedule("30 * * * *").ToController() // Every hour on the half hour +// +// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself. +// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects. +func (f *Factory) ResyncSchedule(schedules ...string) *Factory { + f.resyncSchedules = append(f.resyncSchedules, schedules...) + return f +} + +// WithSyncContext allows to specify custom, existing sync context for this factory. +// This is useful during unit testing where you can override the default event recorder or mock the runtime objects. +// If this function not called, a SyncContext is created by the factory automatically. +func (f *Factory) WithSyncContext(ctx SyncContext) *Factory { + f.syncContext = ctx + return f +} + +// WithSyncDegradedOnError encapsulate the controller sync() function, so when this function return an error, the operator client +// is used to set the degraded condition to (eg. "ControllerFooDegraded"). The degraded condition name is set based on the controller name. +func (f *Factory) WithSyncDegradedOnError(operatorClient operatorv1helpers.OperatorClient) *Factory { + f.syncDegradedClient = operatorClient + return f +} + +// Controller produce a runnable controller. +func (f *Factory) ToController(name string, eventRecorder events.Recorder) Controller { + if f.sync == nil { + panic(fmt.Errorf("WithSync() must be used before calling ToController() in %q", name)) + } + + var ctx SyncContext + if f.syncContext != nil { + ctx = f.syncContext + } else { + ctx = NewSyncContext(name, eventRecorder) + } + + var cronSchedules []cron.Schedule + if len(f.resyncSchedules) > 0 { + var errors []error + for _, schedule := range f.resyncSchedules { + if s, err := cron.ParseStandard(schedule); err != nil { + errors = append(errors, err) + } else { + cronSchedules = append(cronSchedules, s) + } + } + if err := errorutil.NewAggregate(errors); err != nil { + panic(fmt.Errorf("failed to parse controller schedules for %q: %v", name, err)) + } + } + + c := &baseController{ + name: name, + syncDegradedClient: f.syncDegradedClient, + sync: f.sync, + resyncEvery: f.resyncInterval, + resyncSchedules: cronSchedules, + cachesToSync: append([]cache.InformerSynced{}, f.cachesToSync...), + syncContext: ctx, + postStartHooks: f.postStartHooks, + cacheSyncTimeout: defaultCacheSyncTimeout, + } + + // Warn about too fast resyncs as they might drain the operators QPS. + // This event is cheap as it is only emitted on operator startup. + if c.resyncEvery.Seconds() < 60 { + ctx.Recorder().Warningf("FastControllerResync", "Controller %q resync interval is set to %s which might lead to client request throttling", name, c.resyncEvery) + } + + for i := range f.informerQueueKeys { + for d := range f.informerQueueKeys[i].informers { + informer := f.informerQueueKeys[i].informers[d] + queueKeyFn := f.informerQueueKeys[i].queueKeyFn + informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(queueKeyFn, f.informerQueueKeys[i].filter)) + c.cachesToSync = append(c.cachesToSync, informer.HasSynced) + } + } + + for i := range f.informers { + for d := range f.informers[i].informers { + informer := f.informers[i].informers[d] + informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(func(runtime.Object) string { + return DefaultQueueKey + }, f.informers[i].filter)) + c.cachesToSync = append(c.cachesToSync, informer.HasSynced) + } + } + + for i := range f.bareInformers { + c.cachesToSync = append(c.cachesToSync, f.bareInformers[i].HasSynced) + } + + for i := range f.namespaceInformers { + f.namespaceInformers[i].informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(func(runtime.Object) string { + return DefaultQueueKey + }, f.namespaceInformers[i].nsFilter)) + c.cachesToSync = append(c.cachesToSync, f.namespaceInformers[i].informer.HasSynced) + } + + return c +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go new file mode 100644 index 0000000000..0ef98c6701 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go @@ -0,0 +1,47 @@ +package factory + +import ( + "context" + + "k8s.io/client-go/util/workqueue" + + "github.com/openshift/library-go/pkg/operator/events" +) + +// Controller interface represents a runnable Kubernetes controller. +// Cancelling the syncContext passed will cause the controller to shutdown. +// Number of workers determine how much parallel the job processing should be. +type Controller interface { + // Run runs the controller and blocks until the controller is finished. + // Number of workers can be specified via workers parameter. + // This function will return when all internal loops are finished. + // Note that having more than one worker usually means handing parallelization of Sync(). + Run(ctx context.Context, workers int) + + // Sync contain the main controller logic. + // This should not be called directly, but can be used in unit tests to exercise the sync. + Sync(ctx context.Context, controllerContext SyncContext) error + + // Name returns the controller name string. + Name() string +} + +// SyncContext interface represents a context given to the Sync() function where the main controller logic happen. +// SyncContext exposes controller name and give user access to the queue (for manual requeue). +// SyncContext also provides metadata about object that informers observed as changed. +type SyncContext interface { + // Queue gives access to controller queue. This can be used for manual requeue, although if a Sync() function return + // an error, the object is automatically re-queued. Use with caution. + Queue() workqueue.RateLimitingInterface + + // QueueKey represents the queue key passed to the Sync function. + QueueKey() string + + // Recorder provide access to event recorder. + Recorder() events.Recorder +} + +// SyncFunc is a function that contain main controller logic. +// The syncContext.syncContext passed is the main controller syncContext, when cancelled it means the controller is being shut down. +// The syncContext provides access to controller name, queue and event recorder. +type SyncFunc func(ctx context.Context, controllerContext SyncContext) error diff --git a/vendor/github.com/openshift/library-go/pkg/operator/csr/README.md b/vendor/github.com/openshift/library-go/pkg/operator/csr/README.md new file mode 100644 index 0000000000..9802105c73 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/csr/README.md @@ -0,0 +1,16 @@ +You usually want to start with NewSimpleClientCertificateController. + +This package provides a control loop which takes as input +1. target secret name +2. cert common name +3. desired validity (recall that the signing cert can sign for less) + +The flow goes like this. +1. if secret contains a valid client cert good for at least five days or 50% of validity, do nothing. If not... +2. create new cert/key pair in memory +3. create CSR in the API. +4. watch CSR in the API until it is approved or denied +5. if denied, write degraded status and return +6. if approved, update the secret + +The secrets have annotations which match our other cert rotation secrets. \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/operator/csr/cert_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/csr/cert_controller.go new file mode 100644 index 0000000000..f2bb06ec70 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/csr/cert_controller.go @@ -0,0 +1,322 @@ +package csr + +import ( + "context" + "crypto/tls" + "crypto/x509/pkix" + "fmt" + "math/rand" + "time" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + + certificates "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + certificatesinformers "k8s.io/client-go/informers/certificates/v1" + corev1informers "k8s.io/client-go/informers/core/v1" + csrclient "k8s.io/client-go/kubernetes/typed/certificates/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + certificateslisters "k8s.io/client-go/listers/certificates/v1" + cache "k8s.io/client-go/tools/cache" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/keyutil" + "k8s.io/klog/v2" +) + +const ( + // TLSKeyFile is the name of tls key file in kubeconfigSecret + TLSKeyFile = "tls.key" + // TLSCertFile is the name of the tls cert file in kubeconfigSecret + TLSCertFile = "tls.crt" +) + +// ControllerResyncInterval is exposed so that integration tests can crank up the constroller sync speed. +var ControllerResyncInterval = 5 * time.Minute + +// CSROption includes options that is used to create and monitor csrs +type CSROption struct { + // ObjectMeta is the ObjectMeta shared by all created csrs. It should use GenerateName instead of Name + // to generate random csr names + ObjectMeta metav1.ObjectMeta + // Subject represents the subject of the client certificate used to create csrs + Subject *pkix.Name + // DNSNames represents DNS names used to create the client certificate + DNSNames []string + // SignerName is the name of the signer specified in the created csrs + SignerName string + + // EventFilterFunc matches csrs created with above options + EventFilterFunc factory.EventFilterFunc +} + +// ClientCertOption includes options that is used to create client certificate +type ClientCertOption struct { + // SecretNamespace is the namespace of the secret containing client certificate. + SecretNamespace string + // SecretName is the name of the secret containing client certificate. The secret will be created if + // it does not exist. + SecretName string + // AdditonalSecretData contains data that will be added into client certificate secret besides tls.key/tls.crt + AdditonalSecretData map[string][]byte +} + +// clientCertificateController implements the common logic of hub client certification creation/rotation. It +// creates a client certificate and rotates it before it becomes expired by using csrs. The client +// certificate generated is stored in a specific secret with the keys below: +// 1). tls.key: tls key file +// 2). tls.crt: tls cert file +type clientCertificateController struct { + ClientCertOption + CSROption + + hubCSRLister certificateslisters.CertificateSigningRequestLister + hubCSRClient csrclient.CertificateSigningRequestInterface + spokeCoreClient corev1client.CoreV1Interface + controllerName string + + // csrName is the name of csr created by controller and waiting for approval. + csrName string + + // keyData is the private key data used to created a csr + // csrName and keyData store the internal state of the controller. They are set after controller creates a new csr + // and cleared once the csr is approved and processed by controller. There are 4 combination of their values: + // 1. csrName empty, keyData empty: means we aren't trying to create a new client cert, our current one is valid + // 2. csrName set, keyData empty: there was bug + // 3. csrName set, keyData set: we are waiting for a new cert to be signed. + // 4. csrName empty, keydata set: the CSR failed to create, this shouldn't happen, it's a bug. + keyData []byte +} + +// NewClientCertificateController return an instance of clientCertificateController +func NewClientCertificateController( + clientCertOption ClientCertOption, + csrOption CSROption, + hubCSRInformer certificatesinformers.CertificateSigningRequestInformer, + hubCSRClient csrclient.CertificateSigningRequestInterface, + spokeSecretInformer corev1informers.SecretInformer, + spokeCoreClient corev1client.CoreV1Interface, + recorder events.Recorder, + controllerName string, +) factory.Controller { + c := clientCertificateController{ + ClientCertOption: clientCertOption, + CSROption: csrOption, + hubCSRLister: hubCSRInformer.Lister(), + hubCSRClient: hubCSRClient, + spokeCoreClient: spokeCoreClient, + controllerName: controllerName, + } + + return factory.New(). + WithFilteredEventsInformersQueueKeyFunc(func(obj runtime.Object) string { + key, _ := cache.MetaNamespaceKeyFunc(obj) + return key + }, func(obj interface{}) bool { + accessor, err := meta.Accessor(obj) + if err != nil { + return false + } + // only enqueue a specific secret + if accessor.GetNamespace() == c.SecretNamespace && accessor.GetName() == c.SecretName { + return true + } + return false + }, spokeSecretInformer.Informer()). + WithFilteredEventsInformersQueueKeyFunc(func(obj runtime.Object) string { + accessor, _ := meta.Accessor(obj) + return accessor.GetName() + }, c.EventFilterFunc, hubCSRInformer.Informer()). + WithSync(c.sync). + ResyncEvery(ControllerResyncInterval). + ToController(controllerName, recorder) +} + +func (c *clientCertificateController) sync(ctx context.Context, syncCtx factory.SyncContext) error { + // get secret containing client certificate + secret, err := c.spokeCoreClient.Secrets(c.SecretNamespace).Get(ctx, c.SecretName, metav1.GetOptions{}) + switch { + case errors.IsNotFound(err): + secret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: c.SecretNamespace, + Name: c.SecretName, + }, + } + case err != nil: + return fmt.Errorf("unable to get secret %q: %w", c.SecretNamespace+"/"+c.SecretName, err) + } + + // reconcile pending csr if exists + if len(c.csrName) > 0 { + newSecretConfig, err := c.syncCSR(secret) + if err != nil { + c.reset() + return err + } + if len(newSecretConfig) == 0 { + return nil + } + // append additional data into client certificate secret + for k, v := range c.AdditonalSecretData { + newSecretConfig[k] = v + } + secret.Data = newSecretConfig + // save the changes into secret + if err := c.saveSecret(secret); err != nil { + return err + } + syncCtx.Recorder().Eventf("ClientCertificateCreated", "A new client certificate for %s is available", c.controllerName) + c.reset() + return nil + } + + // create a csr to request new client certificate if + // a. there is no valid client certificate issued for the current cluster/agent + // b. client certificate exists and has less than a random percentage range from 20% to 25% of its life remaining + if c.hasValidClientCertificate(secret) { + notBefore, notAfter, err := getCertValidityPeriod(secret) + if err != nil { + return err + } + + total := notAfter.Sub(*notBefore) + remaining := notAfter.Sub(time.Now()) + klog.V(4).Infof("Client certificate for %s: time total=%v, remaining=%v, remaining/total=%v", c.controllerName, total, remaining, remaining.Seconds()/total.Seconds()) + threshold := jitter(0.2, 0.25) + if remaining.Seconds()/total.Seconds() > threshold { + // Do nothing if the client certificate is valid and has more than a random percentage range from 20% to 25% of its life remaining + klog.V(4).Infof("Client certificate for %s is valid and has more than %.2f%% of its life remaining", c.controllerName, threshold*100) + return nil + } + syncCtx.Recorder().Eventf("CertificateRotationStarted", "The current client certificate for %s expires in %v. Start certificate rotation", c.controllerName, remaining.Round(time.Second)) + } else { + syncCtx.Recorder().Eventf("NoValidCertificateFound", "No valid client certificate for %s is found. Bootstrap is required", c.controllerName) + } + + // create a new private key + c.keyData, err = keyutil.MakeEllipticPrivateKeyPEM() + if err != nil { + return err + } + + // create a csr + c.csrName, err = c.createCSR(ctx) + if err != nil { + c.reset() + return err + } + syncCtx.Recorder().Eventf("CSRCreated", "A csr %q is created for %s", c.csrName, c.controllerName) + return nil +} + +func (c *clientCertificateController) syncCSR(secret *corev1.Secret) (map[string][]byte, error) { + // skip if there is no ongoing csr + if len(c.csrName) == 0 { + return nil, fmt.Errorf("no ongoing csr") + } + + // skip if csr no longer exists + csr, err := c.hubCSRLister.Get(c.csrName) + switch { + case errors.IsNotFound(err): + // fallback to fetching csr from hub apiserver in case it is not cached by informer yet + csr, err = c.hubCSRClient.Get(context.Background(), c.csrName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil, fmt.Errorf("unable to get csr %q. It might have already been deleted.", c.csrName) + } + case err != nil: + return nil, err + } + + // skip if csr is not approved yet + if !isCSRApproved(csr) { + return nil, nil + } + + // skip if csr has no certificate in its status yet + if len(csr.Status.Certificate) == 0 { + return nil, nil + } + + klog.V(4).Infof("Sync csr %v", c.csrName) + // check if cert in csr status matches with the corresponding private key + if c.keyData == nil { + return nil, fmt.Errorf("No private key found for certificate in csr: %s", c.csrName) + } + _, err = tls.X509KeyPair(csr.Status.Certificate, c.keyData) + if err != nil { + return nil, fmt.Errorf("Private key does not match with the certificate in csr: %s", c.csrName) + } + + data := map[string][]byte{ + TLSCertFile: csr.Status.Certificate, + TLSKeyFile: c.keyData, + } + + return data, nil +} + +func (c *clientCertificateController) createCSR(ctx context.Context) (string, error) { + privateKey, err := keyutil.ParsePrivateKeyPEM(c.keyData) + if err != nil { + return "", fmt.Errorf("invalid private key for certificate request: %w", err) + } + csrData, err := certutil.MakeCSR(privateKey, c.Subject, c.DNSNames, nil) + if err != nil { + return "", fmt.Errorf("unable to generate certificate request: %w", err) + } + + csr := &certificates.CertificateSigningRequest{ + ObjectMeta: c.ObjectMeta, + Spec: certificates.CertificateSigningRequestSpec{ + Request: csrData, + Usages: []certificates.KeyUsage{ + certificates.UsageDigitalSignature, + certificates.UsageKeyEncipherment, + certificates.UsageClientAuth, + }, + SignerName: c.SignerName, + }, + } + + req, err := c.hubCSRClient.Create(ctx, csr, metav1.CreateOptions{}) + if err != nil { + return "", err + } + return req.Name, nil +} + +func (c *clientCertificateController) saveSecret(secret *corev1.Secret) error { + var err error + if secret.ResourceVersion == "" { + _, err = c.spokeCoreClient.Secrets(c.SecretNamespace).Create(context.Background(), secret, metav1.CreateOptions{}) + return err + } + _, err = c.spokeCoreClient.Secrets(c.SecretNamespace).Update(context.Background(), secret, metav1.UpdateOptions{}) + return err +} + +func (c *clientCertificateController) reset() { + c.csrName = "" + c.keyData = nil +} + +func (c *clientCertificateController) hasValidClientCertificate(secret *corev1.Secret) bool { + if valid, err := IsCertificateValid(secret.Data[TLSCertFile], c.Subject); err == nil { + return valid + } + return false +} + +func jitter(percentage float64, maxFactor float64) float64 { + if maxFactor <= 0.0 { + maxFactor = 1.0 + } + newPercentage := percentage + percentage*rand.Float64()*maxFactor + return newPercentage +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/csr/certificate.go b/vendor/github.com/openshift/library-go/pkg/operator/csr/certificate.go new file mode 100644 index 0000000000..1aeb661454 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/csr/certificate.go @@ -0,0 +1,135 @@ +package csr + +import ( + "crypto/x509/pkix" + "errors" + "fmt" + "time" + + certificatesv1 "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + certutil "k8s.io/client-go/util/cert" + "k8s.io/klog/v2" +) + +// IsCertificateValid return true if +// 1) All certs in client certificate are not expired. +// 2) At least one cert matches the given subject if specified +func IsCertificateValid(certData []byte, subject *pkix.Name) (bool, error) { + certs, err := certutil.ParseCertsPEM(certData) + if err != nil { + return false, errors.New("unable to parse certificate") + } + + if len(certs) == 0 { + return false, errors.New("No cert found in certificate") + } + + now := time.Now() + // make sure no cert in the certificate chain expired + for _, cert := range certs { + if now.After(cert.NotAfter) { + klog.V(4).Infof("Part of the certificate is expired: %v", cert.NotAfter) + return false, nil + } + } + + if subject == nil { + return true, nil + } + + // check subject of certificates + for _, cert := range certs { + if cert.Subject.CommonName != subject.CommonName { + continue + } + return true, nil + } + + klog.V(4).Infof("Certificate is not issued for subject (cn=%s)", subject.CommonName) + return false, nil +} + +// getCertValidityPeriod returns the validity period of the client certificate in the secret +func getCertValidityPeriod(secret *corev1.Secret) (*time.Time, *time.Time, error) { + if secret.Data == nil { + return nil, nil, fmt.Errorf("no client certificate found in secret %q", secret.Namespace+"/"+secret.Name) + } + + certData, ok := secret.Data[TLSCertFile] + if !ok { + return nil, nil, fmt.Errorf("no client certificate found in secret %q", secret.Namespace+"/"+secret.Name) + } + + certs, err := certutil.ParseCertsPEM(certData) + if err != nil { + return nil, nil, fmt.Errorf("unable to parse TLS certificates: %w", err) + } + + if len(certs) == 0 { + return nil, nil, errors.New("No cert found in certificate") + } + + // find out the validity period for all certs in the certificate chain + var notBefore, notAfter *time.Time + for index, cert := range certs { + if index == 0 { + notBefore = &cert.NotBefore + notAfter = &cert.NotAfter + continue + } + + if notBefore.Before(cert.NotBefore) { + notBefore = &cert.NotBefore + } + + if notAfter.After(cert.NotAfter) { + notAfter = &cert.NotAfter + } + } + + return notBefore, notAfter, nil +} + +// BuildKubeconfig builds a kubeconfig based on a rest config template with a cert/key pair +func BuildKubeconfig(clientConfig *restclient.Config, certPath, keyPath string) clientcmdapi.Config { + // Build kubeconfig. + kubeconfig := clientcmdapi.Config{ + // Define a cluster stanza based on the bootstrap kubeconfig. + Clusters: map[string]*clientcmdapi.Cluster{"default-cluster": { + Server: clientConfig.Host, + InsecureSkipTLSVerify: false, + CertificateAuthorityData: clientConfig.CAData, + }}, + // Define auth based on the obtained client cert. + AuthInfos: map[string]*clientcmdapi.AuthInfo{"default-auth": { + ClientCertificate: certPath, + ClientKey: keyPath, + }}, + // Define a context that connects the auth info and cluster, and set it as the default + Contexts: map[string]*clientcmdapi.Context{"default-context": { + Cluster: "default-cluster", + AuthInfo: "default-auth", + Namespace: "configuration", + }}, + CurrentContext: "default-context", + } + + return kubeconfig +} + +// isCSRApproved returns true if the given csr has been approved +func isCSRApproved(csr *certificatesv1.CertificateSigningRequest) bool { + approved := false + for _, condition := range csr.Status.Conditions { + if condition.Type == certificatesv1.CertificateDenied { + return false + } else if condition.Type == certificatesv1.CertificateApproved { + approved = true + } + } + + return approved +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/csr/csr_approver.go b/vendor/github.com/openshift/library-go/pkg/operator/csr/csr_approver.go new file mode 100644 index 0000000000..036bf54cbe --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/csr/csr_approver.go @@ -0,0 +1,281 @@ +package csr + +import ( + "context" + "crypto/x509" + "encoding/pem" + "fmt" + + certapiv1 "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + certv1informers "k8s.io/client-go/informers/certificates/v1" + certv1client "k8s.io/client-go/kubernetes/typed/certificates/v1" + certv1listers "k8s.io/client-go/listers/certificates/v1" + "k8s.io/klog/v2" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +type CSRApprovalDecision string + +const ( + CSRApproved CSRApprovalDecision = "Approved" + CSRDenied CSRApprovalDecision = "Denied" + CSRNoOpinion CSRApprovalDecision = "NoOpinion" +) + +type CSRApprover interface { + Approve(csrObj *certapiv1.CertificateSigningRequest, x509CSR *x509.CertificateRequest) (approvalStatus CSRApprovalDecision, denyReason string, err error) +} + +type csrApproverController struct { + csrClient certv1client.CertificateSigningRequestInterface + csrLister certv1listers.CertificateSigningRequestLister + + csrApprover CSRApprover +} + +// NewCSRApproverController returns a controller that is observing the CSR API +// for a CSR of a given name. If such a CSR exists, it runs the `csrApprover.Approve()` +// against it and either denies, approves or leaves the CSR. +// +// If operatorClient is nil, the controller will log the errors instead of reporting +// them in an operator status. +func NewCSRApproverController( + controllerName string, + operatorClient v1helpers.OperatorClient, + csrClient certv1client.CertificateSigningRequestInterface, + csrInformers certv1informers.CertificateSigningRequestInformer, + csrFilter CSRFilter, + csrApprover CSRApprover, + eventsRecorder events.Recorder, +) factory.Controller { + c := &csrApproverController{ + csrClient: csrClient, + csrLister: csrInformers.Lister(), + csrApprover: csrApprover, + } + + csrFilterConverted := func(csr interface{}) bool { + csrObj, ok := csr.(*certapiv1.CertificateSigningRequest) + if !ok { + return false + } + return csrFilter.Matches(csrObj) + } + + f := factory.New(). + WithSync(c.sync). + WithFilteredEventsInformersQueueKeyFunc(factory.ObjectNameToKey, csrFilterConverted, csrInformers.Informer()) + + if operatorClient != nil { + f.WithSyncDegradedOnError(operatorClient) + } + + return f.ToController( + "WebhookAuthenticatorCertApprover_"+controllerName, + eventsRecorder.WithComponentSuffix("webhook-authenticator-cert-approver-"+controllerName), + ) +} + +func (c *csrApproverController) sync(ctx context.Context, syncCtx factory.SyncContext) error { + csr, err := c.csrLister.Get(syncCtx.QueueKey()) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return err + } + + if approved, denied := getCertApprovalCondition(&csr.Status); approved || denied { + return nil + } + + csrCopy := csr.DeepCopy() + + csrPEM, _ := pem.Decode(csr.Spec.Request) + if csrPEM == nil { + return fmt.Errorf("failed to PEM-parse the CSR block in .spec.request: no CSRs were found") + } + + x509CSR, err := x509.ParseCertificateRequest(csrPEM.Bytes) + if err != nil { + return fmt.Errorf("failed to parse the CSR bytes: %v", err) + } + + if x509CSR.Subject.CommonName == csr.Spec.Username { + c.denyCSR(ctx, csrCopy, "IllegitimateRequester", "requester cannot request certificates for themselves", syncCtx.Recorder()) + } + + csrDecision, denyReason, err := c.csrApprover.Approve(csr, x509CSR) + if err != nil { + return c.denyCSR(ctx, csrCopy, "CSRApprovingFailed", fmt.Sprintf("there was an error during CSR approval: %v", err), syncCtx.Recorder()) + } + + switch csrDecision { + case CSRDenied: + return c.denyCSR(ctx, csrCopy, "CSRDenied", denyReason, syncCtx.Recorder()) + case CSRApproved: + return c.approveCSR(ctx, csrCopy, syncCtx.Recorder()) + case CSRNoOpinion: + fallthrough + default: + return nil + } +} + +func (c *csrApproverController) denyCSR(ctx context.Context, csrCopy *certapiv1.CertificateSigningRequest, reason, message string, eventsRecorder events.Recorder) error { + csrCopy.Status.Conditions = append(csrCopy.Status.Conditions, + certapiv1.CertificateSigningRequestCondition{ + Type: certapiv1.CertificateDenied, + Status: corev1.ConditionTrue, + Reason: reason, + Message: message, + }, + ) + + eventsRecorder.Eventf("CSRDenial", "The CSR %q has been denied: %s - %s", csrCopy.Name, reason, message) + _, err := c.csrClient.UpdateApproval(ctx, csrCopy.Name, csrCopy, v1.UpdateOptions{}) + return err +} + +func (c *csrApproverController) approveCSR(ctx context.Context, csrCopy *certapiv1.CertificateSigningRequest, eventsRecorder events.Recorder) error { + csrCopy.Status.Conditions = append(csrCopy.Status.Conditions, + certapiv1.CertificateSigningRequestCondition{ + Type: certapiv1.CertificateApproved, + Status: corev1.ConditionTrue, + Reason: "AutoApproved", + Message: fmt.Sprintf("Auto-approved CSR %q", csrCopy.Name), + }) + + eventsRecorder.Eventf("CSRApproval", "The CSR %q has been approved", csrCopy.Name) + _, err := c.csrClient.UpdateApproval(ctx, csrCopy.Name, csrCopy, v1.UpdateOptions{}) + return err +} + +func getCertApprovalCondition(status *certapiv1.CertificateSigningRequestStatus) (approved bool, denied bool) { + for _, c := range status.Conditions { + if c.Type == certapiv1.CertificateApproved { + approved = true + } + if c.Type == certapiv1.CertificateDenied { + denied = true + } + } + return +} + +type ServiceAccountApprover struct { + saGroups sets.String // saGroups is the set of groups for the SA expected to have created the CSR + saName string + expectedSubject string +} + +// ServiceAccountApprover approves CSRs with a given subject issued by the provided service account +func NewServiceAccountApprover(saNamespace, saName, expectedSubject string, additionalGroups ...string) *ServiceAccountApprover { + saGroups := append(serviceaccount.MakeGroupNames(saNamespace), "system:authenticated") + + return &ServiceAccountApprover{ + saName: serviceaccount.MakeUsername(saNamespace, saName), + saGroups: sets.NewString(append(saGroups, additionalGroups...)...), + expectedSubject: expectedSubject, + } +} + +func (a *ServiceAccountApprover) Approve(csrObj *certapiv1.CertificateSigningRequest, x509CSR *x509.CertificateRequest) (approvalStatus CSRApprovalDecision, denyReason string, err error) { + if csrObj == nil || x509CSR == nil { + return CSRDenied, "Error", fmt.Errorf("received a 'nil' CSR") + } + + if csrObj.Spec.Username != a.saName { + return CSRDenied, fmt.Sprintf("CSR %q was created by an unexpected user: %q", csrObj.Name, csrObj.Spec.Username), nil + } + + if csrGroups := sets.NewString(csrObj.Spec.Groups...); !csrGroups.Equal(a.saGroups) { + return CSRDenied, fmt.Sprintf("CSR %q was created by a user with unexpected groups: %v", csrObj.Name, csrGroups.List()), nil + } + + if expectedSubject := a.expectedSubject; x509CSR.Subject.String() != expectedSubject { + return CSRDenied, fmt.Sprintf("expected the CSR's subject to be %q, but it is %q", expectedSubject, x509CSR.Subject.String()), nil + } + + return CSRApproved, "", nil + +} + +type CSRFilter interface { + Matches(csr *certapiv1.CertificateSigningRequest) bool +} + +type AndFilter struct { + a, b CSRFilter +} + +func NewAndFilter(a, b CSRFilter) *AndFilter { + return &AndFilter{a, b} +} + +func (f *AndFilter) Matches(csr *certapiv1.CertificateSigningRequest) bool { + return f.a.Matches(csr) && f.b.Matches(csr) +} + +type OrFilter struct { + a, b CSRFilter +} + +func NewOrFilter(a, b CSRFilter) *OrFilter { + return &OrFilter{a, b} +} + +func (f *OrFilter) Matches(csr *certapiv1.CertificateSigningRequest) bool { + return f.a.Matches(csr) || f.b.Matches(csr) +} + +type LabelFilter struct { + labelSelector labels.Selector +} + +func NewLabelFilter(selector labels.Selector) *LabelFilter { + return &LabelFilter{selector} +} + +func (f *LabelFilter) Matches(csr *certapiv1.CertificateSigningRequest) bool { + return f.labelSelector.Matches(labels.Set(csr.Labels)) +} + +type NamesFilter struct { + names sets.String +} + +func NewNamesFilter(names ...string) *NamesFilter { + return &NamesFilter{sets.NewString(names...)} +} + +func (f *NamesFilter) Matches(csr *certapiv1.CertificateSigningRequest) bool { + return f.names.Has(csr.Name) +} + +type RequestCommonNameFilter struct { + commonNames sets.String +} + +func NewRequestCommonNameFilter(commonNames ...string) *RequestCommonNameFilter { + return &RequestCommonNameFilter{sets.NewString(commonNames...)} +} + +func (f *RequestCommonNameFilter) Match(csr *certapiv1.CertificateSigningRequest) bool { + x509CSR, err := x509.ParseCertificateRequest(csr.Spec.Request) + if err != nil { + klog.V(4).Infof("failed to parse the CSR .spec.request of %q: %v", csr.Name, err) + return false + } + + return f.commonNames.Has(x509CSR.Subject.CommonName) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/csr/simple_clientcert_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/csr/simple_clientcert_controller.go new file mode 100644 index 0000000000..af78eba126 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/csr/simple_clientcert_controller.go @@ -0,0 +1,50 @@ +package csr + +import ( + "crypto/x509/pkix" + "fmt" + + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NewSimpleClientCertificateController creates a controller that keeps a secret up to date with a client-cert +// valid against the kube-apiserver. This version only works in a single cluster. The base library allows +// the secret in one cluster and the CSR in another. +func NewSimpleClientCertificateController( + secretNamespace, secretName string, + commonName string, groups []string, + kubeInformers informers.SharedInformerFactory, + kubeClient kubernetes.Interface, + recorder events.Recorder, +) factory.Controller { + certOptions := ClientCertOption{ + SecretNamespace: secretNamespace, + SecretName: secretName, + } + csrOptions := CSROption{ + ObjectMeta: metav1.ObjectMeta{}, + Subject: &pkix.Name{ + Organization: groups, + CommonName: commonName, + }, + SignerName: "kubernetes.io/kube-apiserver-client", + EventFilterFunc: nil, + } + controllerName := fmt.Sprintf("client-cert-%s[%s]", secretName, secretNamespace) + + return NewClientCertificateController( + certOptions, + csrOptions, + kubeInformers.Certificates().V1().CertificateSigningRequests(), + kubeClient.CertificatesV1().CertificateSigningRequests(), + kubeInformers.Core().V1().Secrets(), + kubeClient.CoreV1(), + recorder, + controllerName, + ) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS new file mode 100644 index 0000000000..4f189b7087 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS @@ -0,0 +1,8 @@ +reviewers: + - mfojtik + - deads2k + - sttts +approvers: + - mfojtik + - deads2k + - sttts diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go new file mode 100644 index 0000000000..68b2b005c1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go @@ -0,0 +1,219 @@ +package events + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// Recorder is a simple event recording interface. +type Recorder interface { + Event(reason, message string) + Eventf(reason, messageFmt string, args ...interface{}) + Warning(reason, message string) + Warningf(reason, messageFmt string, args ...interface{}) + + // ForComponent allows to fiddle the component name before sending the event to sink. + // Making more unique components will prevent the spam filter in upstream event sink from dropping + // events. + ForComponent(componentName string) Recorder + + // WithComponentSuffix is similar to ForComponent except it just suffix the current component name instead of overriding. + WithComponentSuffix(componentNameSuffix string) Recorder + + // ComponentName returns the current source component name for the event. + // This allows to suffix the original component name with 'sub-component'. + ComponentName() string + + Shutdown() +} + +// podNameEnv is a name of environment variable inside container that specifies the name of the current replica set. +// This replica set name is then used as a source/involved object for operator events. +const podNameEnv = "POD_NAME" + +// podNameEnvFunc allows to override the way we get the environment variable value (for unit tests). +var podNameEnvFunc = func() string { + return os.Getenv(podNameEnv) +} + +// GetControllerReferenceForCurrentPod provides an object reference to a controller managing the pod/container where this process runs. +// The pod name must be provided via the POD_NAME name. +// Even if this method returns an error, it always return valid reference to the namespace. It allows the callers to control the logging +// and decide to fail or accept the namespace. +func GetControllerReferenceForCurrentPod(client kubernetes.Interface, targetNamespace string, reference *corev1.ObjectReference) (*corev1.ObjectReference, error) { + if reference == nil { + // Try to get the pod name via POD_NAME environment variable + reference := &corev1.ObjectReference{Kind: "Pod", Name: podNameEnvFunc(), Namespace: targetNamespace} + if len(reference.Name) != 0 { + return GetControllerReferenceForCurrentPod(client, targetNamespace, reference) + } + // If that fails, lets try to guess the pod by listing all pods in namespaces and using the first pod in the list + reference, err := guessControllerReferenceForNamespace(client.CoreV1().Pods(targetNamespace)) + if err != nil { + // If this fails, do not give up with error but instead use the namespace as controller reference for the pod + // NOTE: This is last resort, if we see this often it might indicate something is wrong in the cluster. + // In some cases this might help with flakes. + return getControllerReferenceForNamespace(targetNamespace), err + } + return GetControllerReferenceForCurrentPod(client, targetNamespace, reference) + } + + switch reference.Kind { + case "Pod": + pod, err := client.CoreV1().Pods(reference.Namespace).Get(context.TODO(), reference.Name, metav1.GetOptions{}) + if err != nil { + return getControllerReferenceForNamespace(reference.Namespace), err + } + if podController := metav1.GetControllerOf(pod); podController != nil { + return GetControllerReferenceForCurrentPod(client, targetNamespace, makeObjectReference(podController, targetNamespace)) + } + // This is a bare pod without any ownerReference + return makeObjectReference(&metav1.OwnerReference{Kind: "Pod", Name: pod.Name, UID: pod.UID, APIVersion: "v1"}, pod.Namespace), nil + case "ReplicaSet": + rs, err := client.AppsV1().ReplicaSets(reference.Namespace).Get(context.TODO(), reference.Name, metav1.GetOptions{}) + if err != nil { + return getControllerReferenceForNamespace(reference.Namespace), err + } + if rsController := metav1.GetControllerOf(rs); rsController != nil { + return GetControllerReferenceForCurrentPod(client, targetNamespace, makeObjectReference(rsController, targetNamespace)) + } + // This is a replicaSet without any ownerReference + return reference, nil + default: + return reference, nil + } +} + +// getControllerReferenceForNamespace returns an object reference to the given namespace. +func getControllerReferenceForNamespace(targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: "Namespace", + Namespace: targetNamespace, + Name: targetNamespace, + APIVersion: "v1", + } +} + +// makeObjectReference makes object reference from ownerReference and target namespace +func makeObjectReference(owner *metav1.OwnerReference, targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: owner.Kind, + Namespace: targetNamespace, + Name: owner.Name, + UID: owner.UID, + APIVersion: owner.APIVersion, + } +} + +// guessControllerReferenceForNamespace tries to guess what resource to reference. +func guessControllerReferenceForNamespace(client corev1client.PodInterface) (*corev1.ObjectReference, error) { + pods, err := client.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + if len(pods.Items) == 0 { + return nil, fmt.Errorf("unable to setup event recorder as %q env variable is not set and there are no pods", podNameEnv) + } + + for _, pod := range pods.Items { + ownerRef := metav1.GetControllerOf(&pod) + if ownerRef == nil { + continue + } + return &corev1.ObjectReference{ + Kind: ownerRef.Kind, + Namespace: pod.Namespace, + Name: ownerRef.Name, + UID: ownerRef.UID, + APIVersion: ownerRef.APIVersion, + }, nil + } + return nil, errors.New("can't guess controller ref") +} + +// NewRecorder returns new event recorder. +func NewRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return &recorder{ + eventClient: client, + involvedObjectRef: involvedObjectRef, + sourceComponent: sourceComponentName, + } +} + +// recorder is an implementation of Recorder interface. +type recorder struct { + eventClient corev1client.EventInterface + involvedObjectRef *corev1.ObjectReference + sourceComponent string +} + +func (r *recorder) ComponentName() string { + return r.sourceComponent +} + +func (r *recorder) Shutdown() {} + +func (r *recorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := *r + newRecorderForComponent.sourceComponent = componentName + return &newRecorderForComponent +} + +func (r *recorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Event emits the normal type event and allow formatting of message. +func (r *recorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warning emits the warning type event and allow formatting of message. +func (r *recorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Event emits the normal type event. +func (r *recorder) Event(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeNormal, reason, message) + if _, err := r.eventClient.Create(context.TODO(), event, metav1.CreateOptions{}); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +// Warning emits the warning type event. +func (r *recorder) Warning(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeWarning, reason, message) + if _, err := r.eventClient.Create(context.TODO(), event, metav1.CreateOptions{}); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +func makeEvent(involvedObjRef *corev1.ObjectReference, sourceComponent string, eventType, reason, message string) *corev1.Event { + currentTime := metav1.Time{Time: time.Now()} + event := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", involvedObjRef.Name, currentTime.UnixNano()), + Namespace: involvedObjRef.Namespace, + }, + InvolvedObject: *involvedObjRef, + Reason: reason, + Message: message, + Type: eventType, + Count: 1, + FirstTimestamp: currentTime, + LastTimestamp: currentTime, + } + event.Source.Component = sourceComponent + return event +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go new file mode 100644 index 0000000000..7c6856c350 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go @@ -0,0 +1,79 @@ +package events + +import ( + "fmt" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +type inMemoryEventRecorder struct { + events []*corev1.Event + source string + sync.Mutex +} + +// inMemoryDummyObjectReference is used for fake events. +var inMemoryDummyObjectReference = corev1.ObjectReference{ + Kind: "Pod", + Namespace: "dummy", + Name: "dummy", + APIVersion: "v1", +} + +type InMemoryRecorder interface { + Events() []*corev1.Event + Recorder +} + +// NewInMemoryRecorder provides event recorder that stores all events recorded in memory and allow to replay them using the Events() method. +// This recorder should be only used in unit tests. +func NewInMemoryRecorder(sourceComponent string) InMemoryRecorder { + return &inMemoryEventRecorder{events: []*corev1.Event{}, source: sourceComponent} +} + +func (r *inMemoryEventRecorder) ComponentName() string { + return r.source +} + +func (r *inMemoryEventRecorder) Shutdown() {} + +func (r *inMemoryEventRecorder) ForComponent(component string) Recorder { + r.Lock() + defer r.Unlock() + r.source = component + return r +} + +func (r *inMemoryEventRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Events returns list of recorded events +func (r *inMemoryEventRecorder) Events() []*corev1.Event { + return r.events +} + +func (r *inMemoryEventRecorder) Event(reason, message string) { + r.Lock() + defer r.Unlock() + event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeNormal, reason, message) + r.events = append(r.events, event) +} + +func (r *inMemoryEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *inMemoryEventRecorder) Warning(reason, message string) { + r.Lock() + defer r.Unlock() + event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeWarning, reason, message) + klog.Info(event.String()) + r.events = append(r.events, event) +} + +func (r *inMemoryEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go new file mode 100644 index 0000000000..24796c807c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go @@ -0,0 +1,51 @@ +package events + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +type LoggingEventRecorder struct { + component string +} + +// NewLoggingEventRecorder provides event recorder that will log all recorded events via klog. +func NewLoggingEventRecorder(component string) Recorder { + return &LoggingEventRecorder{component: component} +} + +func (r *LoggingEventRecorder) ComponentName() string { + return r.component +} + +func (r *LoggingEventRecorder) ForComponent(component string) Recorder { + newRecorder := *r + newRecorder.component = component + return &newRecorder +} + +func (r *LoggingEventRecorder) Shutdown() {} + +func (r *LoggingEventRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *LoggingEventRecorder) Event(reason, message string) { + event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeNormal, reason, message) + klog.Info(event.String()) +} + +func (r *LoggingEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *LoggingEventRecorder) Warning(reason, message string) { + event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeWarning, reason, message) + klog.Warning(event.String()) +} + +func (r *LoggingEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go new file mode 100644 index 0000000000..39f9eb5962 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go @@ -0,0 +1,166 @@ +package events + +import ( + "fmt" + "strings" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog/v2" +) + +// NewKubeRecorder returns new event recorder with tweaked correlator options. +func NewKubeRecorderWithOptions(client corev1client.EventInterface, options record.CorrelatorOptions, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return (&upstreamRecorder{ + client: client, + component: sourceComponentName, + involvedObjectRef: involvedObjectRef, + options: options, + fallbackRecorder: NewRecorder(client, sourceComponentName, involvedObjectRef), + }).ForComponent(sourceComponentName) +} + +// NewKubeRecorder returns new event recorder with default correlator options. +func NewKubeRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return NewKubeRecorderWithOptions(client, record.CorrelatorOptions{}, sourceComponentName, involvedObjectRef) +} + +// upstreamRecorder is an implementation of Recorder interface. +type upstreamRecorder struct { + client corev1client.EventInterface + component string + broadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + involvedObjectRef *corev1.ObjectReference + options record.CorrelatorOptions + + // shuttingDown indicates that the broadcaster for this recorder is being shut down + shuttingDown bool + shutdownMutex sync.RWMutex + + // fallbackRecorder is used when the kube recorder is shutting down + // in that case we create the events directly. + fallbackRecorder Recorder +} + +// RecommendedClusterSingletonCorrelatorOptions provides recommended event correlator options for components that produce +// many events (like operators). +func RecommendedClusterSingletonCorrelatorOptions() record.CorrelatorOptions { + return record.CorrelatorOptions{ + BurstSize: 60, // default: 25 (change allows a single source to send 50 events about object per minute) + QPS: 1. / 1., // default: 1/300 (change allows refill rate to 1 new event every 1s) + KeyFunc: func(event *corev1.Event) (aggregateKey string, localKey string) { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + // By default, KeyFunc don't use message for aggregation, this cause events with different message, but same reason not be lost as "similar events". + event.Message, + }, ""), event.Message + }, + } +} + +var eventsCounterMetric = metrics.NewCounterVec(&metrics.CounterOpts{ + Subsystem: "event_recorder", + Name: "total_events_count", + Help: "Total count of events processed by this event recorder per involved object", + StabilityLevel: metrics.ALPHA, +}, []string{"severity"}) + +func init() { + (&sync.Once{}).Do(func() { + legacyregistry.MustRegister(eventsCounterMetric) + }) +} + +func (r *upstreamRecorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := upstreamRecorder{ + client: r.client, + fallbackRecorder: r.fallbackRecorder.WithComponentSuffix(componentName), + options: r.options, + involvedObjectRef: r.involvedObjectRef, + shuttingDown: r.shuttingDown, + } + + // tweak the event correlator, so we don't loose important events. + broadcaster := record.NewBroadcasterWithCorrelatorOptions(r.options) + broadcaster.StartLogging(klog.Infof) + broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: newRecorderForComponent.client}) + + newRecorderForComponent.eventRecorder = broadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: componentName}) + newRecorderForComponent.broadcaster = broadcaster + newRecorderForComponent.component = componentName + + return &newRecorderForComponent +} + +func (r *upstreamRecorder) Shutdown() { + r.shutdownMutex.Lock() + r.shuttingDown = true + r.shutdownMutex.Unlock() + // Wait for broadcaster to flush events (this is blocking) + // TODO: There is still race condition in upstream that might cause panic() on events recorded after the shutdown + // is called as the event recording is not-blocking (go routine based). + r.broadcaster.Shutdown() +} + +func (r *upstreamRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *upstreamRecorder) ComponentName() string { + return r.component +} + +// Eventf emits the normal type event and allow formatting of message. +func (r *upstreamRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warningf emits the warning type event and allow formatting of message. +func (r *upstreamRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *upstreamRecorder) incrementEventsCounter(severity string) { + if r.involvedObjectRef == nil { + return + } + eventsCounterMetric.WithLabelValues(severity).Inc() +} + +// Event emits the normal type event. +func (r *upstreamRecorder) Event(reason, message string) { + r.shutdownMutex.RLock() + defer r.shutdownMutex.RUnlock() + defer r.incrementEventsCounter(corev1.EventTypeNormal) + if r.shuttingDown { + r.fallbackRecorder.Event(reason, message) + return + } + r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeNormal, reason, message) +} + +// Warning emits the warning type event. +func (r *upstreamRecorder) Warning(reason, message string) { + r.shutdownMutex.RLock() + defer r.shutdownMutex.RUnlock() + defer r.incrementEventsCounter(corev1.EventTypeWarning) + if r.shuttingDown { + r.fallbackRecorder.Warning(reason, message) + return + } + r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeWarning, reason, message) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go new file mode 100644 index 0000000000..294770f3e0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go @@ -0,0 +1,77 @@ +package management + +import ( + v1 "github.com/openshift/api/operator/v1" +) + +var ( + allowOperatorUnmanagedState = true + allowOperatorRemovedState = true +) + +// SetOperatorAlwaysManaged is one time choice when an operator want to opt-out from supporting the "unmanaged" state. +// This is a case of control plane operators or operators that are required to always run otherwise the cluster will +// get into unstable state or critical components will stop working. +func SetOperatorAlwaysManaged() { + allowOperatorUnmanagedState = false +} + +// SetOperatorUnmanageable is one time choice when an operator wants to support the "unmanaged" state. +// This is the default setting, provided here mostly for unit tests. +func SetOperatorUnmanageable() { + allowOperatorUnmanagedState = true +} + +// SetOperatorNotRemovable is one time choice the operator author can make to indicate the operator does not support +// removing of his operand. This makes sense for operators like kube-apiserver where removing operand will lead to a +// bricked, non-automatically recoverable state. +func SetOperatorNotRemovable() { + allowOperatorRemovedState = false +} + +// SetOperatorRemovable is one time choice the operator author can make to indicate the operator supports +// removing of his operand. +// This is the default setting, provided here mostly for unit tests. +func SetOperatorRemovable() { + allowOperatorRemovedState = true +} + +// IsOperatorAlwaysManaged means the operator can't be set to unmanaged state. +func IsOperatorAlwaysManaged() bool { + return !allowOperatorUnmanagedState +} + +// IsOperatorNotRemovable means the operator can't be set to removed state. +func IsOperatorNotRemovable() bool { + return !allowOperatorRemovedState +} + +// IsOperatorRemovable means the operator can be set to removed state. +func IsOperatorRemovable() bool { + return allowOperatorRemovedState +} + +func IsOperatorUnknownState(state v1.ManagementState) bool { + switch state { + case v1.Managed, v1.Removed, v1.Unmanaged: + return false + default: + return true + } +} + +// IsOperatorManaged indicates whether the operator management state allows the control loop to proceed and manage the operand. +func IsOperatorManaged(state v1.ManagementState) bool { + if IsOperatorAlwaysManaged() || IsOperatorNotRemovable() { + return true + } + switch state { + case v1.Managed: + return true + case v1.Removed: + return false + case v1.Unmanaged: + return false + } + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/args.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/args.go new file mode 100644 index 0000000000..e1a165e63f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/args.go @@ -0,0 +1,61 @@ +package v1helpers + +import ( + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +// FlagsFromUnstructured process the unstructured arguments usually retrieved from an operator's configuration file under a specific key. +// There are only two supported/valid types for arguments, that is []sting and/or string. +// Passing a different type yield an error. +// +// Use ToFlagSlice function to get a slice of string flags. +func FlagsFromUnstructured(unstructuredArgs map[string]interface{}) (map[string][]string, error) { + return flagsFromUnstructured(unstructuredArgs) +} + +// ToFlagSlice transforms the provided arguments to a slice of string flags. +// A flag name is taken directly from the key and the value is simply attached. +// A flag is repeated iff it has more than one value. +func ToFlagSlice(args map[string][]string) []string { + var keys []string + for key := range args { + keys = append(keys, key) + } + sort.Strings(keys) + + var flags []string + for _, key := range keys { + for _, token := range args[key] { + flags = append(flags, fmt.Sprintf("--%s=%s", key, token)) + } + } + return flags +} + +// flagsFromUnstructured process the unstructured arguments (interface{}) to a map of strings. +// There are only two supported/valid types for arguments, that is []sting and/or string. +// Passing a different type yield an error. +func flagsFromUnstructured(unstructuredArgs map[string]interface{}) (map[string][]string, error) { + ret := map[string][]string{} + for argName, argRawValue := range unstructuredArgs { + var argsSlice []string + var found bool + var err error + + argsSlice, found, err = unstructured.NestedStringSlice(unstructuredArgs, argName) + if !found || err != nil { + str, found, err := unstructured.NestedString(unstructuredArgs, argName) + if !found || err != nil { + return nil, fmt.Errorf("unable to process an argument, incorrect value %v under %v key, expected []string or string", argRawValue, argName) + } + argsSlice = append(argsSlice, str) + } + + ret[argName] = argsSlice + } + + return ret, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go new file mode 100644 index 0000000000..bdfe17d92a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go @@ -0,0 +1,127 @@ +package v1helpers + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +var ( + emptyGetOptions = metav1.GetOptions{} + emptyListOptions = metav1.ListOptions{} +) + +type combinedConfigMapGetter struct { + client corev1client.ConfigMapsGetter + listers KubeInformersForNamespaces +} + +func CachedConfigMapGetter(client corev1client.ConfigMapsGetter, listers KubeInformersForNamespaces) corev1client.ConfigMapsGetter { + return &combinedConfigMapGetter{ + client: client, + listers: listers, + } +} + +type combinedConfigMapInterface struct { + corev1client.ConfigMapInterface + lister corev1listers.ConfigMapNamespaceLister + namespace string +} + +func (g combinedConfigMapGetter) ConfigMaps(namespace string) corev1client.ConfigMapInterface { + return combinedConfigMapInterface{ + ConfigMapInterface: g.client.ConfigMaps(namespace), + lister: g.listers.InformersFor(namespace).Core().V1().ConfigMaps().Lister().ConfigMaps(namespace), + namespace: namespace, + } +} + +func (g combinedConfigMapInterface) Get(_ context.Context, name string, options metav1.GetOptions) (*corev1.ConfigMap, error) { + if !equality.Semantic.DeepEqual(options, emptyGetOptions) { + return nil, fmt.Errorf("GetOptions are not honored by cached client: %#v", options) + } + + ret, err := g.lister.Get(name) + if err != nil { + return nil, err + } + return ret.DeepCopy(), nil +} +func (g combinedConfigMapInterface) List(_ context.Context, options metav1.ListOptions) (*corev1.ConfigMapList, error) { + if !equality.Semantic.DeepEqual(options, emptyListOptions) { + return nil, fmt.Errorf("ListOptions are not honored by cached client: %#v", options) + } + + list, err := g.lister.List(labels.Everything()) + if err != nil { + return nil, err + } + + ret := &corev1.ConfigMapList{} + for i := range list { + ret.Items = append(ret.Items, *(list[i].DeepCopy())) + } + return ret, nil +} + +type combinedSecretGetter struct { + client corev1client.SecretsGetter + listers KubeInformersForNamespaces +} + +func CachedSecretGetter(client corev1client.SecretsGetter, listers KubeInformersForNamespaces) corev1client.SecretsGetter { + return &combinedSecretGetter{ + client: client, + listers: listers, + } +} + +type combinedSecretInterface struct { + corev1client.SecretInterface + lister corev1listers.SecretNamespaceLister + namespace string +} + +func (g combinedSecretGetter) Secrets(namespace string) corev1client.SecretInterface { + return combinedSecretInterface{ + SecretInterface: g.client.Secrets(namespace), + lister: g.listers.InformersFor(namespace).Core().V1().Secrets().Lister().Secrets(namespace), + namespace: namespace, + } +} + +func (g combinedSecretInterface) Get(_ context.Context, name string, options metav1.GetOptions) (*corev1.Secret, error) { + if !equality.Semantic.DeepEqual(options, emptyGetOptions) { + return nil, fmt.Errorf("GetOptions are not honored by cached client: %#v", options) + } + + ret, err := g.lister.Get(name) + if err != nil { + return nil, err + } + return ret.DeepCopy(), nil +} + +func (g combinedSecretInterface) List(_ context.Context, options metav1.ListOptions) (*corev1.SecretList, error) { + if !equality.Semantic.DeepEqual(options, emptyListOptions) { + return nil, fmt.Errorf("ListOptions are not honored by cached client: %#v", options) + } + + list, err := g.lister.List(labels.Everything()) + if err != nil { + return nil, err + } + + ret := &corev1.SecretList{} + for i := range list { + ret.Items = append(ret.Items, *(list[i].DeepCopy())) + } + return ret, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go new file mode 100644 index 0000000000..8933328978 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go @@ -0,0 +1,7 @@ +package v1helpers + +import "k8s.io/client-go/informers" + +func NewFakeKubeInformersForNamespaces(informers map[string]informers.SharedInformerFactory) KubeInformersForNamespaces { + return kubeInformersForNamespaces(informers) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go new file mode 100644 index 0000000000..1a332f7dc7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go @@ -0,0 +1,384 @@ +package v1helpers + +import ( + "errors" + "fmt" + "os" + "sort" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/util/retry" + + "github.com/ghodss/yaml" + + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" +) + +// SetOperandVersion sets the new version and returns the previous value. +func SetOperandVersion(versions *[]configv1.OperandVersion, operandVersion configv1.OperandVersion) string { + if versions == nil { + versions = &[]configv1.OperandVersion{} + } + existingVersion := FindOperandVersion(*versions, operandVersion.Name) + if existingVersion == nil { + *versions = append(*versions, operandVersion) + return "" + } + + previous := existingVersion.Version + existingVersion.Version = operandVersion.Version + return previous +} + +func FindOperandVersion(versions []configv1.OperandVersion, name string) *configv1.OperandVersion { + if versions == nil { + return nil + } + for i := range versions { + if versions[i].Name == name { + return &versions[i] + } + } + return nil +} + +func SetOperatorCondition(conditions *[]operatorv1.OperatorCondition, newCondition operatorv1.OperatorCondition) { + if conditions == nil { + conditions = &[]operatorv1.OperatorCondition{} + } + existingCondition := FindOperatorCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +func RemoveOperatorCondition(conditions *[]operatorv1.OperatorCondition, conditionType string) { + if conditions == nil { + conditions = &[]operatorv1.OperatorCondition{} + } + newConditions := []operatorv1.OperatorCondition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +func FindOperatorCondition(conditions []operatorv1.OperatorCondition, conditionType string) *operatorv1.OperatorCondition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +func IsOperatorConditionTrue(conditions []operatorv1.OperatorCondition, conditionType string) bool { + return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionTrue) +} + +func IsOperatorConditionFalse(conditions []operatorv1.OperatorCondition, conditionType string) bool { + return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionFalse) +} + +func IsOperatorConditionPresentAndEqual(conditions []operatorv1.OperatorCondition, conditionType string, status operatorv1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} + +// UpdateOperatorSpecFunc is a func that mutates an operator spec. +type UpdateOperatorSpecFunc func(spec *operatorv1.OperatorSpec) error + +// UpdateSpec applies the update funcs to the oldStatus and tries to update via the client. +func UpdateSpec(client OperatorClient, updateFuncs ...UpdateOperatorSpecFunc) (*operatorv1.OperatorSpec, bool, error) { + updated := false + var operatorSpec *operatorv1.OperatorSpec + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + oldSpec, _, resourceVersion, err := client.GetOperatorState() + if err != nil { + return err + } + + newSpec := oldSpec.DeepCopy() + for _, update := range updateFuncs { + if err := update(newSpec); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldSpec, newSpec) { + return nil + } + + operatorSpec, _, err = client.UpdateOperatorSpec(resourceVersion, newSpec) + updated = err == nil + return err + }) + + return operatorSpec, updated, err +} + +// UpdateSpecConfigFn returns a func to update the config. +func UpdateObservedConfigFn(config map[string]interface{}) UpdateOperatorSpecFunc { + return func(oldSpec *operatorv1.OperatorSpec) error { + oldSpec.ObservedConfig = runtime.RawExtension{Object: &unstructured.Unstructured{Object: config}} + return nil + } +} + +// UpdateStatusFunc is a func that mutates an operator status. +type UpdateStatusFunc func(status *operatorv1.OperatorStatus) error + +// UpdateStatus applies the update funcs to the oldStatus and tries to update via the client. +func UpdateStatus(client OperatorClient, updateFuncs ...UpdateStatusFunc) (*operatorv1.OperatorStatus, bool, error) { + updated := false + var updatedOperatorStatus *operatorv1.OperatorStatus + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, oldStatus, resourceVersion, err := client.GetOperatorState() + if err != nil { + return err + } + + newStatus := oldStatus.DeepCopy() + for _, update := range updateFuncs { + if err := update(newStatus); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldStatus, newStatus) { + // We return the newStatus which is a deep copy of oldStatus but with all update funcs applied. + updatedOperatorStatus = newStatus + return nil + } + + updatedOperatorStatus, err = client.UpdateOperatorStatus(resourceVersion, newStatus) + updated = err == nil + return err + }) + + return updatedOperatorStatus, updated, err +} + +// UpdateConditionFunc returns a func to update a condition. +func UpdateConditionFn(cond operatorv1.OperatorCondition) UpdateStatusFunc { + return func(oldStatus *operatorv1.OperatorStatus) error { + SetOperatorCondition(&oldStatus.Conditions, cond) + return nil + } +} + +// UpdateStatusFunc is a func that mutates an operator status. +type UpdateStaticPodStatusFunc func(status *operatorv1.StaticPodOperatorStatus) error + +// UpdateStaticPodStatus applies the update funcs to the oldStatus abd tries to update via the client. +func UpdateStaticPodStatus(client StaticPodOperatorClient, updateFuncs ...UpdateStaticPodStatusFunc) (*operatorv1.StaticPodOperatorStatus, bool, error) { + updated := false + var updatedOperatorStatus *operatorv1.StaticPodOperatorStatus + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, oldStatus, resourceVersion, err := client.GetStaticPodOperatorState() + if err != nil { + return err + } + + newStatus := oldStatus.DeepCopy() + for _, update := range updateFuncs { + if err := update(newStatus); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldStatus, newStatus) { + // We return the newStatus which is a deep copy of oldStatus but with all update funcs applied. + updatedOperatorStatus = newStatus + return nil + } + + updatedOperatorStatus, err = client.UpdateStaticPodOperatorStatus(resourceVersion, newStatus) + updated = err == nil + return err + }) + + return updatedOperatorStatus, updated, err +} + +// UpdateStaticPodConditionFn returns a func to update a condition. +func UpdateStaticPodConditionFn(cond operatorv1.OperatorCondition) UpdateStaticPodStatusFunc { + return func(oldStatus *operatorv1.StaticPodOperatorStatus) error { + SetOperatorCondition(&oldStatus.Conditions, cond) + return nil + } +} + +// EnsureFinalizer adds a new finalizer to the operator CR, if it does not exists. No-op otherwise. +// The finalizer name is computed from the controller name and operator name ($OPERATOR_NAME or os.Args[0]) +// It re-tries on conflicts. +func EnsureFinalizer(client OperatorClientWithFinalizers, controllerName string) error { + finalizer := getFinalizerName(controllerName) + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + return client.EnsureFinalizer(finalizer) + }) + return err +} + +// RemoveFinalizer removes a finalizer from the operator CR, if it is there. No-op otherwise. +// The finalizer name is computed from the controller name and operator name ($OPERATOR_NAME or os.Args[0]) +// It re-tries on conflicts. +func RemoveFinalizer(client OperatorClientWithFinalizers, controllerName string) error { + finalizer := getFinalizerName(controllerName) + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + return client.RemoveFinalizer(finalizer) + }) + return err +} + +// getFinalizerName computes a nice finalizer name from controllerName and the operator name ($OPERATOR_NAME or os.Args[0]). +func getFinalizerName(controllerName string) string { + return fmt.Sprintf("%s.operator.openshift.io/%s", getOperatorName(), controllerName) +} + +func getOperatorName() string { + if name := os.Getenv("OPERATOR_NAME"); name != "" { + return name + } + return os.Args[0] +} + +type aggregate []error + +var _ utilerrors.Aggregate = aggregate{} + +// NewMultiLineAggregate returns an aggregate error with multi-line output +func NewMultiLineAggregate(errList []error) error { + var errs []error + for _, e := range errList { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregate(errs) +} + +// Error is part of the error interface. +func (agg aggregate) Error() string { + msgs := make([]string, len(agg)) + for i := range agg { + msgs[i] = agg[i].Error() + } + return strings.Join(msgs, "\n") +} + +// Errors is part of the Aggregate interface. +func (agg aggregate) Errors() []error { + return []error(agg) +} + +// Is is part of the Aggregate interface +func (agg aggregate) Is(target error) bool { + return agg.visit(func(err error) bool { + return errors.Is(err, target) + }) +} + +func (agg aggregate) visit(f func(err error) bool) bool { + for _, err := range agg { + switch err := err.(type) { + case aggregate: + if match := err.visit(f); match { + return match + } + case utilerrors.Aggregate: + for _, nestedErr := range err.Errors() { + if match := f(nestedErr); match { + return match + } + } + default: + if match := f(err); match { + return match + } + } + } + + return false +} + +// MapToEnvVars converts a string-string map to a slice of corev1.EnvVar-s +func MapToEnvVars(mapEnvVars map[string]string) []corev1.EnvVar { + if mapEnvVars == nil { + return nil + } + + envVars := make([]corev1.EnvVar, len(mapEnvVars)) + i := 0 + for k, v := range mapEnvVars { + envVars[i] = corev1.EnvVar{Name: k, Value: v} + i++ + } + + // need to sort the slice so that kube-controller-manager-pod configmap does not change all the time + sort.Slice(envVars, func(i, j int) bool { return envVars[i].Name < envVars[j].Name }) + return envVars +} + +// InjectObservedProxyIntoContainers injects proxy environment variables in containers specified in containerNames. +func InjectObservedProxyIntoContainers(podSpec *corev1.PodSpec, containerNames []string, observedConfig []byte, fields ...string) error { + var config map[string]interface{} + if err := yaml.Unmarshal(observedConfig, &config); err != nil { + return fmt.Errorf("failed to unmarshal the observedConfig: %w", err) + } + + proxyConfig, found, err := unstructured.NestedStringMap(config, fields...) + if err != nil { + return fmt.Errorf("couldn't get the proxy config from observedConfig: %w", err) + } + + proxyEnvVars := MapToEnvVars(proxyConfig) + if !found || len(proxyEnvVars) < 1 { + // There's no observed proxy config, we should tolerate that + return nil + } + + for _, containerName := range containerNames { + for i := range podSpec.InitContainers { + if podSpec.InitContainers[i].Name == containerName { + podSpec.InitContainers[i].Env = append(podSpec.InitContainers[i].Env, proxyEnvVars...) + } + } + for i := range podSpec.Containers { + if podSpec.Containers[i].Name == containerName { + podSpec.Containers[i].Env = append(podSpec.Containers[i].Env, proxyEnvVars...) + } + } + } + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go new file mode 100644 index 0000000000..ba3769252d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go @@ -0,0 +1,135 @@ +package v1helpers + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +// KubeInformersForNamespaces is a simple way to combine several shared informers into a single struct with unified listing power +type KubeInformersForNamespaces interface { + Start(stopCh <-chan struct{}) + InformersFor(namespace string) informers.SharedInformerFactory + Namespaces() sets.String + + ConfigMapLister() corev1listers.ConfigMapLister + SecretLister() corev1listers.SecretLister + + // Used in by workloads controller and controllers that report deployment pods status + PodLister() corev1listers.PodLister +} + +var _ KubeInformersForNamespaces = kubeInformersForNamespaces{} + +func NewKubeInformersForNamespaces(kubeClient kubernetes.Interface, namespaces ...string) KubeInformersForNamespaces { + ret := kubeInformersForNamespaces{} + for _, namespace := range namespaces { + if len(namespace) == 0 { + ret[""] = informers.NewSharedInformerFactory(kubeClient, 10*time.Minute) + continue + } + ret[namespace] = informers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Minute, informers.WithNamespace(namespace)) + } + + return ret +} + +type kubeInformersForNamespaces map[string]informers.SharedInformerFactory + +func (i kubeInformersForNamespaces) Start(stopCh <-chan struct{}) { + for _, informer := range i { + informer.Start(stopCh) + } +} + +func (i kubeInformersForNamespaces) Namespaces() sets.String { + return sets.StringKeySet(i) +} +func (i kubeInformersForNamespaces) InformersFor(namespace string) informers.SharedInformerFactory { + return i[namespace] +} + +func (i kubeInformersForNamespaces) HasInformersFor(namespace string) bool { + return i.InformersFor(namespace) != nil +} + +type configMapLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) ConfigMapLister() corev1listers.ConfigMapLister { + return configMapLister(i) +} + +func (l configMapLister) List(selector labels.Selector) (ret []*corev1.ConfigMap, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().ConfigMaps().Lister().List(selector) +} + +func (l configMapLister) ConfigMaps(namespace string) corev1listers.ConfigMapNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().ConfigMaps().Lister().ConfigMaps(namespace) +} + +type secretLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) SecretLister() corev1listers.SecretLister { + return secretLister(i) +} + +func (l secretLister) List(selector labels.Selector) (ret []*corev1.Secret, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().Secrets().Lister().List(selector) +} + +func (l secretLister) Secrets(namespace string) corev1listers.SecretNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().Secrets().Lister().Secrets(namespace) +} + +type podLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) PodLister() corev1listers.PodLister { + return podLister(i) +} + +func (l podLister) List(selector labels.Selector) (ret []*corev1.Pod, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().Pods().Lister().List(selector) +} + +func (l podLister) Pods(namespace string) corev1listers.PodNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().Pods().Lister().Pods(namespace) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go new file mode 100644 index 0000000000..f5d60d9cff --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go @@ -0,0 +1,41 @@ +package v1helpers + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" +) + +type OperatorClient interface { + Informer() cache.SharedIndexInformer + // GetObjectMeta return the operator metadata. + GetObjectMeta() (meta *metav1.ObjectMeta, err error) + // GetOperatorState returns the operator spec, status and the resource version, potentially from a lister. + GetOperatorState() (spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, resourceVersion string, err error) + // UpdateOperatorSpec updates the spec of the operator, assuming the given resource version. + UpdateOperatorSpec(oldResourceVersion string, in *operatorv1.OperatorSpec) (out *operatorv1.OperatorSpec, newResourceVersion string, err error) + // UpdateOperatorStatus updates the status of the operator, assuming the given resource version. + UpdateOperatorStatus(oldResourceVersion string, in *operatorv1.OperatorStatus) (out *operatorv1.OperatorStatus, err error) +} + +type StaticPodOperatorClient interface { + OperatorClient + // GetStaticPodOperatorState returns the static pod operator spec, status and the resource version, + // potentially from a lister. + GetStaticPodOperatorState() (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error) + // GetStaticPodOperatorStateWithQuorum return the static pod operator spec, status and resource version + // directly from a server read. + GetStaticPodOperatorStateWithQuorum() (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error) + // UpdateStaticPodOperatorStatus updates the status, assuming the given resource version. + UpdateStaticPodOperatorStatus(resourceVersion string, in *operatorv1.StaticPodOperatorStatus) (out *operatorv1.StaticPodOperatorStatus, err error) + // UpdateStaticPodOperatorSpec updates the spec, assuming the given resource version. + UpdateStaticPodOperatorSpec(resourceVersion string, in *operatorv1.StaticPodOperatorSpec) (out *operatorv1.StaticPodOperatorSpec, newResourceVersion string, err error) +} + +type OperatorClientWithFinalizers interface { + OperatorClient + // EnsureFinalizer adds a new finalizer to the operator CR, if it does not exists. No-op otherwise. + EnsureFinalizer(finalizer string) error + // RemoveFinalizer removes a finalizer from the operator CR, if it is there. No-op otherwise. + RemoveFinalizer(finalizer string) error +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go new file mode 100644 index 0000000000..4c3a604c75 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go @@ -0,0 +1,288 @@ +package v1helpers + +import ( + "context" + "fmt" + "strconv" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// NewFakeSharedIndexInformer returns a fake shared index informer, suitable to use in static pod controller unit tests. +func NewFakeSharedIndexInformer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +type fakeSharedIndexInformer struct{} + +func (fakeSharedIndexInformer) AddEventHandler(handler cache.ResourceEventHandler) { +} + +func (fakeSharedIndexInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { +} + +func (fakeSharedIndexInformer) GetStore() cache.Store { + panic("implement me") +} + +func (fakeSharedIndexInformer) GetController() cache.Controller { + panic("implement me") +} + +func (fakeSharedIndexInformer) Run(stopCh <-chan struct{}) { + panic("implement me") +} + +func (fakeSharedIndexInformer) HasSynced() bool { + return true +} + +func (fakeSharedIndexInformer) LastSyncResourceVersion() string { + panic("implement me") +} + +func (fakeSharedIndexInformer) AddIndexers(indexers cache.Indexers) error { + panic("implement me") +} + +func (fakeSharedIndexInformer) GetIndexer() cache.Indexer { + panic("implement me") +} + +func (fakeSharedIndexInformer) SetWatchErrorHandler(handler cache.WatchErrorHandler) error { + panic("implement me") +} + +// NewFakeStaticPodOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. +func NewFakeStaticPodOperatorClient( + staticPodSpec *operatorv1.StaticPodOperatorSpec, staticPodStatus *operatorv1.StaticPodOperatorStatus, + triggerStatusErr func(rv string, status *operatorv1.StaticPodOperatorStatus) error, + triggerSpecErr func(rv string, spec *operatorv1.StaticPodOperatorSpec) error) StaticPodOperatorClient { + return &fakeStaticPodOperatorClient{ + fakeStaticPodOperatorSpec: staticPodSpec, + fakeStaticPodOperatorStatus: staticPodStatus, + resourceVersion: "0", + triggerStatusUpdateError: triggerStatusErr, + triggerSpecUpdateError: triggerSpecErr, + } +} + +type fakeStaticPodOperatorClient struct { + fakeStaticPodOperatorSpec *operatorv1.StaticPodOperatorSpec + fakeStaticPodOperatorStatus *operatorv1.StaticPodOperatorStatus + resourceVersion string + triggerStatusUpdateError func(rv string, status *operatorv1.StaticPodOperatorStatus) error + triggerSpecUpdateError func(rv string, status *operatorv1.StaticPodOperatorSpec) error +} + +func (c *fakeStaticPodOperatorClient) Informer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} + +} +func (c *fakeStaticPodOperatorClient) GetObjectMeta() (*metav1.ObjectMeta, error) { + panic("not supported") +} + +func (c *fakeStaticPodOperatorClient) GetStaticPodOperatorState() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + return c.fakeStaticPodOperatorSpec, c.fakeStaticPodOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) GetStaticPodOperatorStateWithQuorum() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + return c.fakeStaticPodOperatorSpec, c.fakeStaticPodOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorStatus(resourceVersion string, status *operatorv1.StaticPodOperatorStatus) (*operatorv1.StaticPodOperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + if err := c.triggerStatusUpdateError(resourceVersion, status); err != nil { + return nil, err + } + } + c.fakeStaticPodOperatorStatus = status + return c.fakeStaticPodOperatorStatus, nil +} + +func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorSpec(resourceVersion string, spec *operatorv1.StaticPodOperatorSpec) (*operatorv1.StaticPodOperatorSpec, string, error) { + if c.resourceVersion != resourceVersion { + return nil, "", errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, "", err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerSpecUpdateError != nil { + if err := c.triggerSpecUpdateError(resourceVersion, spec); err != nil { + return nil, "", err + } + } + c.fakeStaticPodOperatorSpec = spec + return c.fakeStaticPodOperatorSpec, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + return &c.fakeStaticPodOperatorSpec.OperatorSpec, &c.fakeStaticPodOperatorStatus.OperatorStatus, c.resourceVersion, nil +} +func (c *fakeStaticPodOperatorClient) UpdateOperatorSpec(string, *operatorv1.OperatorSpec) (spec *operatorv1.OperatorSpec, resourceVersion string, err error) { + panic("not supported") +} +func (c *fakeStaticPodOperatorClient) UpdateOperatorStatus(resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + staticPodStatus := c.fakeStaticPodOperatorStatus.DeepCopy() + staticPodStatus.OperatorStatus = *status + if err := c.triggerStatusUpdateError(resourceVersion, staticPodStatus); err != nil { + return nil, err + } + } + c.fakeStaticPodOperatorStatus.OperatorStatus = *status + return &c.fakeStaticPodOperatorStatus.OperatorStatus, nil +} + +// NewFakeNodeLister returns a fake node lister suitable to use in node controller unit test +func NewFakeNodeLister(client kubernetes.Interface) corev1listers.NodeLister { + return &fakeNodeLister{client: client} +} + +type fakeNodeLister struct { + client kubernetes.Interface +} + +func (n *fakeNodeLister) List(selector labels.Selector) ([]*corev1.Node, error) { + nodes, err := n.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, err + } + ret := []*corev1.Node{} + for i := range nodes.Items { + ret = append(ret, &nodes.Items[i]) + } + return ret, nil +} + +func (n *fakeNodeLister) Get(name string) (*corev1.Node, error) { + panic("implement me") +} + +// NewFakeOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. +func NewFakeOperatorClient(spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClientWithFinalizers { + return NewFakeOperatorClientWithObjectMeta(nil, spec, status, triggerErr) +} + +func NewFakeOperatorClientWithObjectMeta(meta *metav1.ObjectMeta, spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClientWithFinalizers { + return &fakeOperatorClient{ + fakeOperatorSpec: spec, + fakeOperatorStatus: status, + fakeObjectMeta: meta, + resourceVersion: "0", + triggerStatusUpdateError: triggerErr, + } +} + +type fakeOperatorClient struct { + fakeOperatorSpec *operatorv1.OperatorSpec + fakeOperatorStatus *operatorv1.OperatorStatus + fakeObjectMeta *metav1.ObjectMeta + resourceVersion string + triggerStatusUpdateError func(rv string, status *operatorv1.OperatorStatus) error +} + +func (c *fakeOperatorClient) Informer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +func (c *fakeOperatorClient) GetObjectMeta() (*metav1.ObjectMeta, error) { + if c.fakeObjectMeta == nil { + return &metav1.ObjectMeta{}, nil + } + + return c.fakeObjectMeta, nil +} + +func (c *fakeOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + return c.fakeOperatorSpec, c.fakeOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeOperatorClient) UpdateOperatorStatus(resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + if err := c.triggerStatusUpdateError(resourceVersion, status); err != nil { + return nil, err + } + } + c.fakeOperatorStatus = status + return c.fakeOperatorStatus, nil +} + +func (c *fakeOperatorClient) UpdateOperatorSpec(resourceVersion string, spec *operatorv1.OperatorSpec) (*operatorv1.OperatorSpec, string, error) { + if c.resourceVersion != resourceVersion { + return nil, c.resourceVersion, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, c.resourceVersion, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + c.fakeOperatorSpec = spec + return c.fakeOperatorSpec, c.resourceVersion, nil +} + +func (c *fakeOperatorClient) EnsureFinalizer(finalizer string) error { + if c.fakeObjectMeta == nil { + c.fakeObjectMeta = &metav1.ObjectMeta{} + } + for _, f := range c.fakeObjectMeta.Finalizers { + if f == finalizer { + return nil + } + } + c.fakeObjectMeta.Finalizers = append(c.fakeObjectMeta.Finalizers, finalizer) + return nil +} + +func (c *fakeOperatorClient) RemoveFinalizer(finalizer string) error { + newFinalizers := []string{} + for _, f := range c.fakeObjectMeta.Finalizers { + if f == finalizer { + continue + } + newFinalizers = append(newFinalizers, f) + } + c.fakeObjectMeta.Finalizers = newFinalizers + return nil +} + +func (c *fakeOperatorClient) SetObjectMeta(meta *metav1.ObjectMeta) { + c.fakeObjectMeta = meta +} diff --git a/vendor/github.com/robfig/cron/.gitignore b/vendor/github.com/robfig/cron/.gitignore new file mode 100644 index 0000000000..00268614f0 --- /dev/null +++ b/vendor/github.com/robfig/cron/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/robfig/cron/.travis.yml b/vendor/github.com/robfig/cron/.travis.yml new file mode 100644 index 0000000000..4f2ee4d973 --- /dev/null +++ b/vendor/github.com/robfig/cron/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/robfig/cron/LICENSE b/vendor/github.com/robfig/cron/LICENSE new file mode 100644 index 0000000000..3a0f627ffe --- /dev/null +++ b/vendor/github.com/robfig/cron/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2012 Rob Figueiredo +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/robfig/cron/README.md b/vendor/github.com/robfig/cron/README.md new file mode 100644 index 0000000000..ec40c95fcb --- /dev/null +++ b/vendor/github.com/robfig/cron/README.md @@ -0,0 +1,6 @@ +[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron) +[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron) + +# cron + +Documentation here: https://godoc.org/github.com/robfig/cron diff --git a/vendor/github.com/robfig/cron/constantdelay.go b/vendor/github.com/robfig/cron/constantdelay.go new file mode 100644 index 0000000000..cd6e7b1be9 --- /dev/null +++ b/vendor/github.com/robfig/cron/constantdelay.go @@ -0,0 +1,27 @@ +package cron + +import "time" + +// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes". +// It does not support jobs more frequent than once a second. +type ConstantDelaySchedule struct { + Delay time.Duration +} + +// Every returns a crontab Schedule that activates once every duration. +// Delays of less than a second are not supported (will round up to 1 second). +// Any fields less than a Second are truncated. +func Every(duration time.Duration) ConstantDelaySchedule { + if duration < time.Second { + duration = time.Second + } + return ConstantDelaySchedule{ + Delay: duration - time.Duration(duration.Nanoseconds())%time.Second, + } +} + +// Next returns the next time this should be run. +// This rounds so that the next activation time will be on the second. +func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time { + return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond) +} diff --git a/vendor/github.com/robfig/cron/cron.go b/vendor/github.com/robfig/cron/cron.go new file mode 100644 index 0000000000..2318aeb2e7 --- /dev/null +++ b/vendor/github.com/robfig/cron/cron.go @@ -0,0 +1,259 @@ +package cron + +import ( + "log" + "runtime" + "sort" + "time" +) + +// Cron keeps track of any number of entries, invoking the associated func as +// specified by the schedule. It may be started, stopped, and the entries may +// be inspected while running. +type Cron struct { + entries []*Entry + stop chan struct{} + add chan *Entry + snapshot chan []*Entry + running bool + ErrorLog *log.Logger + location *time.Location +} + +// Job is an interface for submitted cron jobs. +type Job interface { + Run() +} + +// The Schedule describes a job's duty cycle. +type Schedule interface { + // Return the next activation time, later than the given time. + // Next is invoked initially, and then each time the job is run. + Next(time.Time) time.Time +} + +// Entry consists of a schedule and the func to execute on that schedule. +type Entry struct { + // The schedule on which this job should be run. + Schedule Schedule + + // The next time the job will run. This is the zero time if Cron has not been + // started or this entry's schedule is unsatisfiable + Next time.Time + + // The last time this job was run. This is the zero time if the job has never + // been run. + Prev time.Time + + // The Job to run. + Job Job +} + +// byTime is a wrapper for sorting the entry array by time +// (with zero time at the end). +type byTime []*Entry + +func (s byTime) Len() int { return len(s) } +func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byTime) Less(i, j int) bool { + // Two zero times should return false. + // Otherwise, zero is "greater" than any other time. + // (To sort it at the end of the list.) + if s[i].Next.IsZero() { + return false + } + if s[j].Next.IsZero() { + return true + } + return s[i].Next.Before(s[j].Next) +} + +// New returns a new Cron job runner, in the Local time zone. +func New() *Cron { + return NewWithLocation(time.Now().Location()) +} + +// NewWithLocation returns a new Cron job runner. +func NewWithLocation(location *time.Location) *Cron { + return &Cron{ + entries: nil, + add: make(chan *Entry), + stop: make(chan struct{}), + snapshot: make(chan []*Entry), + running: false, + ErrorLog: nil, + location: location, + } +} + +// A wrapper that turns a func() into a cron.Job +type FuncJob func() + +func (f FuncJob) Run() { f() } + +// AddFunc adds a func to the Cron to be run on the given schedule. +func (c *Cron) AddFunc(spec string, cmd func()) error { + return c.AddJob(spec, FuncJob(cmd)) +} + +// AddJob adds a Job to the Cron to be run on the given schedule. +func (c *Cron) AddJob(spec string, cmd Job) error { + schedule, err := Parse(spec) + if err != nil { + return err + } + c.Schedule(schedule, cmd) + return nil +} + +// Schedule adds a Job to the Cron to be run on the given schedule. +func (c *Cron) Schedule(schedule Schedule, cmd Job) { + entry := &Entry{ + Schedule: schedule, + Job: cmd, + } + if !c.running { + c.entries = append(c.entries, entry) + return + } + + c.add <- entry +} + +// Entries returns a snapshot of the cron entries. +func (c *Cron) Entries() []*Entry { + if c.running { + c.snapshot <- nil + x := <-c.snapshot + return x + } + return c.entrySnapshot() +} + +// Location gets the time zone location +func (c *Cron) Location() *time.Location { + return c.location +} + +// Start the cron scheduler in its own go-routine, or no-op if already started. +func (c *Cron) Start() { + if c.running { + return + } + c.running = true + go c.run() +} + +// Run the cron scheduler, or no-op if already running. +func (c *Cron) Run() { + if c.running { + return + } + c.running = true + c.run() +} + +func (c *Cron) runWithRecovery(j Job) { + defer func() { + if r := recover(); r != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + c.logf("cron: panic running job: %v\n%s", r, buf) + } + }() + j.Run() +} + +// Run the scheduler. this is private just due to the need to synchronize +// access to the 'running' state variable. +func (c *Cron) run() { + // Figure out the next activation times for each entry. + now := c.now() + for _, entry := range c.entries { + entry.Next = entry.Schedule.Next(now) + } + + for { + // Determine the next entry to run. + sort.Sort(byTime(c.entries)) + + var timer *time.Timer + if len(c.entries) == 0 || c.entries[0].Next.IsZero() { + // If there are no entries yet, just sleep - it still handles new entries + // and stop requests. + timer = time.NewTimer(100000 * time.Hour) + } else { + timer = time.NewTimer(c.entries[0].Next.Sub(now)) + } + + for { + select { + case now = <-timer.C: + now = now.In(c.location) + // Run every entry whose next time was less than now + for _, e := range c.entries { + if e.Next.After(now) || e.Next.IsZero() { + break + } + go c.runWithRecovery(e.Job) + e.Prev = e.Next + e.Next = e.Schedule.Next(now) + } + + case newEntry := <-c.add: + timer.Stop() + now = c.now() + newEntry.Next = newEntry.Schedule.Next(now) + c.entries = append(c.entries, newEntry) + + case <-c.snapshot: + c.snapshot <- c.entrySnapshot() + continue + + case <-c.stop: + timer.Stop() + return + } + + break + } + } +} + +// Logs an error to stderr or to the configured error log +func (c *Cron) logf(format string, args ...interface{}) { + if c.ErrorLog != nil { + c.ErrorLog.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// Stop stops the cron scheduler if it is running; otherwise it does nothing. +func (c *Cron) Stop() { + if !c.running { + return + } + c.stop <- struct{}{} + c.running = false +} + +// entrySnapshot returns a copy of the current cron entry list. +func (c *Cron) entrySnapshot() []*Entry { + entries := []*Entry{} + for _, e := range c.entries { + entries = append(entries, &Entry{ + Schedule: e.Schedule, + Next: e.Next, + Prev: e.Prev, + Job: e.Job, + }) + } + return entries +} + +// now returns current time in c location +func (c *Cron) now() time.Time { + return time.Now().In(c.location) +} diff --git a/vendor/github.com/robfig/cron/doc.go b/vendor/github.com/robfig/cron/doc.go new file mode 100644 index 0000000000..d02ec2f3b5 --- /dev/null +++ b/vendor/github.com/robfig/cron/doc.go @@ -0,0 +1,129 @@ +/* +Package cron implements a cron spec parser and job runner. + +Usage + +Callers may register Funcs to be invoked on a given schedule. Cron will run +them in their own goroutines. + + c := cron.New() + c.AddFunc("0 30 * * * *", func() { fmt.Println("Every hour on the half hour") }) + c.AddFunc("@hourly", func() { fmt.Println("Every hour") }) + c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty") }) + c.Start() + .. + // Funcs are invoked in their own goroutine, asynchronously. + ... + // Funcs may also be added to a running Cron + c.AddFunc("@daily", func() { fmt.Println("Every day") }) + .. + // Inspect the cron job entries' next and previous run times. + inspect(c.Entries()) + .. + c.Stop() // Stop the scheduler (does not stop any jobs already running). + +CRON Expression Format + +A cron expression represents a set of times, using 6 space-separated fields. + + Field name | Mandatory? | Allowed values | Allowed special characters + ---------- | ---------- | -------------- | -------------------------- + Seconds | Yes | 0-59 | * / , - + Minutes | Yes | 0-59 | * / , - + Hours | Yes | 0-23 | * / , - + Day of month | Yes | 1-31 | * / , - ? + Month | Yes | 1-12 or JAN-DEC | * / , - + Day of week | Yes | 0-6 or SUN-SAT | * / , - ? + +Note: Month and Day-of-week field values are case insensitive. "SUN", "Sun", +and "sun" are equally accepted. + +Special Characters + +Asterisk ( * ) + +The asterisk indicates that the cron expression will match for all values of the +field; e.g., using an asterisk in the 5th field (month) would indicate every +month. + +Slash ( / ) + +Slashes are used to describe increments of ranges. For example 3-59/15 in the +1st field (minutes) would indicate the 3rd minute of the hour and every 15 +minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...", +that is, an increment over the largest possible range of the field. The form +"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the +increment until the end of that specific range. It does not wrap around. + +Comma ( , ) + +Commas are used to separate items of a list. For example, using "MON,WED,FRI" in +the 5th field (day of week) would mean Mondays, Wednesdays and Fridays. + +Hyphen ( - ) + +Hyphens are used to define ranges. For example, 9-17 would indicate every +hour between 9am and 5pm inclusive. + +Question mark ( ? ) + +Question mark may be used instead of '*' for leaving either day-of-month or +day-of-week blank. + +Predefined schedules + +You may use one of several pre-defined schedules in place of a cron expression. + + Entry | Description | Equivalent To + ----- | ----------- | ------------- + @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 0 1 1 * + @monthly | Run once a month, midnight, first of month | 0 0 0 1 * * + @weekly | Run once a week, midnight between Sat/Sun | 0 0 0 * * 0 + @daily (or @midnight) | Run once a day, midnight | 0 0 0 * * * + @hourly | Run once an hour, beginning of hour | 0 0 * * * * + +Intervals + +You may also schedule a job to execute at fixed intervals, starting at the time it's added +or cron is run. This is supported by formatting the cron spec like this: + + @every + +where "duration" is a string accepted by time.ParseDuration +(http://golang.org/pkg/time/#ParseDuration). + +For example, "@every 1h30m10s" would indicate a schedule that activates after +1 hour, 30 minutes, 10 seconds, and then every interval after that. + +Note: The interval does not take the job runtime into account. For example, +if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, +it will have only 2 minutes of idle time between each run. + +Time zones + +All interpretation and scheduling is done in the machine's local time zone (as +provided by the Go time package (http://www.golang.org/pkg/time). + +Be aware that jobs scheduled during daylight-savings leap-ahead transitions will +not be run! + +Thread safety + +Since the Cron service runs concurrently with the calling code, some amount of +care must be taken to ensure proper synchronization. + +All cron methods are designed to be correctly synchronized as long as the caller +ensures that invocations have a clear happens-before ordering between them. + +Implementation + +Cron entries are stored in an array, sorted by their next activation time. Cron +sleeps until the next job is due to be run. + +Upon waking: + - it runs each entry that is active on that second + - it calculates the next run times for the jobs that were run + - it re-sorts the array of entries by next activation time. + - it goes to sleep until the soonest job. +*/ +package cron diff --git a/vendor/github.com/robfig/cron/parser.go b/vendor/github.com/robfig/cron/parser.go new file mode 100644 index 0000000000..a5e83c0a8d --- /dev/null +++ b/vendor/github.com/robfig/cron/parser.go @@ -0,0 +1,380 @@ +package cron + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Configuration options for creating a parser. Most options specify which +// fields should be included, while others enable features. If a field is not +// included the parser will assume a default value. These options do not change +// the order fields are parse in. +type ParseOption int + +const ( + Second ParseOption = 1 << iota // Seconds field, default 0 + Minute // Minutes field, default 0 + Hour // Hours field, default 0 + Dom // Day of month field, default * + Month // Month field, default * + Dow // Day of week field, default * + DowOptional // Optional day of week field, default * + Descriptor // Allow descriptors such as @monthly, @weekly, etc. +) + +var places = []ParseOption{ + Second, + Minute, + Hour, + Dom, + Month, + Dow, +} + +var defaults = []string{ + "0", + "0", + "0", + "*", + "*", + "*", +} + +// A custom Parser that can be configured. +type Parser struct { + options ParseOption + optionals int +} + +// Creates a custom Parser with custom options. +// +// // Standard parser without descriptors +// specParser := NewParser(Minute | Hour | Dom | Month | Dow) +// sched, err := specParser.Parse("0 0 15 */3 *") +// +// // Same as above, just excludes time fields +// subsParser := NewParser(Dom | Month | Dow) +// sched, err := specParser.Parse("15 */3 *") +// +// // Same as above, just makes Dow optional +// subsParser := NewParser(Dom | Month | DowOptional) +// sched, err := specParser.Parse("15 */3") +// +func NewParser(options ParseOption) Parser { + optionals := 0 + if options&DowOptional > 0 { + options |= Dow + optionals++ + } + return Parser{options, optionals} +} + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// It accepts crontab specs and features configured by NewParser. +func (p Parser) Parse(spec string) (Schedule, error) { + if len(spec) == 0 { + return nil, fmt.Errorf("Empty spec string") + } + if spec[0] == '@' && p.options&Descriptor > 0 { + return parseDescriptor(spec) + } + + // Figure out how many fields we need + max := 0 + for _, place := range places { + if p.options&place > 0 { + max++ + } + } + min := max - p.optionals + + // Split fields on whitespace + fields := strings.Fields(spec) + + // Validate number of fields + if count := len(fields); count < min || count > max { + if min == max { + return nil, fmt.Errorf("Expected exactly %d fields, found %d: %s", min, count, spec) + } + return nil, fmt.Errorf("Expected %d to %d fields, found %d: %s", min, max, count, spec) + } + + // Fill in missing fields + fields = expandFields(fields, p.options) + + var err error + field := func(field string, r bounds) uint64 { + if err != nil { + return 0 + } + var bits uint64 + bits, err = getField(field, r) + return bits + } + + var ( + second = field(fields[0], seconds) + minute = field(fields[1], minutes) + hour = field(fields[2], hours) + dayofmonth = field(fields[3], dom) + month = field(fields[4], months) + dayofweek = field(fields[5], dow) + ) + if err != nil { + return nil, err + } + + return &SpecSchedule{ + Second: second, + Minute: minute, + Hour: hour, + Dom: dayofmonth, + Month: month, + Dow: dayofweek, + }, nil +} + +func expandFields(fields []string, options ParseOption) []string { + n := 0 + count := len(fields) + expFields := make([]string, len(places)) + copy(expFields, defaults) + for i, place := range places { + if options&place > 0 { + expFields[i] = fields[n] + n++ + } + if n == count { + break + } + } + return expFields +} + +var standardParser = NewParser( + Minute | Hour | Dom | Month | Dow | Descriptor, +) + +// ParseStandard returns a new crontab schedule representing the given standardSpec +// (https://en.wikipedia.org/wiki/Cron). It differs from Parse requiring to always +// pass 5 entries representing: minute, hour, day of month, month and day of week, +// in that order. It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Standard crontab specs, e.g. "* * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func ParseStandard(standardSpec string) (Schedule, error) { + return standardParser.Parse(standardSpec) +} + +var defaultParser = NewParser( + Second | Minute | Hour | Dom | Month | DowOptional | Descriptor, +) + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Full crontab specs, e.g. "* * * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func Parse(spec string) (Schedule, error) { + return defaultParser.Parse(spec) +} + +// getField returns an Int with the bits set representing all of the times that +// the field represents or error parsing field value. A "field" is a comma-separated +// list of "ranges". +func getField(field string, r bounds) (uint64, error) { + var bits uint64 + ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) + for _, expr := range ranges { + bit, err := getRange(expr, r) + if err != nil { + return bits, err + } + bits |= bit + } + return bits, nil +} + +// getRange returns the bits indicated by the given expression: +// number | number "-" number [ "/" number ] +// or error parsing range. +func getRange(expr string, r bounds) (uint64, error) { + var ( + start, end, step uint + rangeAndStep = strings.Split(expr, "/") + lowAndHigh = strings.Split(rangeAndStep[0], "-") + singleDigit = len(lowAndHigh) == 1 + err error + ) + + var extra uint64 + if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { + start = r.min + end = r.max + extra = starBit + } else { + start, err = parseIntOrName(lowAndHigh[0], r.names) + if err != nil { + return 0, err + } + switch len(lowAndHigh) { + case 1: + end = start + case 2: + end, err = parseIntOrName(lowAndHigh[1], r.names) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("Too many hyphens: %s", expr) + } + } + + switch len(rangeAndStep) { + case 1: + step = 1 + case 2: + step, err = mustParseInt(rangeAndStep[1]) + if err != nil { + return 0, err + } + + // Special handling: "N/step" means "N-max/step". + if singleDigit { + end = r.max + } + default: + return 0, fmt.Errorf("Too many slashes: %s", expr) + } + + if start < r.min { + return 0, fmt.Errorf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr) + } + if end > r.max { + return 0, fmt.Errorf("End of range (%d) above maximum (%d): %s", end, r.max, expr) + } + if start > end { + return 0, fmt.Errorf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr) + } + if step == 0 { + return 0, fmt.Errorf("Step of range should be a positive number: %s", expr) + } + + return getBits(start, end, step) | extra, nil +} + +// parseIntOrName returns the (possibly-named) integer contained in expr. +func parseIntOrName(expr string, names map[string]uint) (uint, error) { + if names != nil { + if namedInt, ok := names[strings.ToLower(expr)]; ok { + return namedInt, nil + } + } + return mustParseInt(expr) +} + +// mustParseInt parses the given expression as an int or returns an error. +func mustParseInt(expr string) (uint, error) { + num, err := strconv.Atoi(expr) + if err != nil { + return 0, fmt.Errorf("Failed to parse int from %s: %s", expr, err) + } + if num < 0 { + return 0, fmt.Errorf("Negative number (%d) not allowed: %s", num, expr) + } + + return uint(num), nil +} + +// getBits sets all bits in the range [min, max], modulo the given step size. +func getBits(min, max, step uint) uint64 { + var bits uint64 + + // If step is 1, use shifts. + if step == 1 { + return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) + } + + // Else, use a simple loop. + for i := min; i <= max; i += step { + bits |= 1 << i + } + return bits +} + +// all returns all bits within the given bounds. (plus the star bit) +func all(r bounds) uint64 { + return getBits(r.min, r.max, 1) | starBit +} + +// parseDescriptor returns a predefined schedule for the expression, or error if none matches. +func parseDescriptor(descriptor string) (Schedule, error) { + switch descriptor { + case "@yearly", "@annually": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: 1 << months.min, + Dow: all(dow), + }, nil + + case "@monthly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: all(months), + Dow: all(dow), + }, nil + + case "@weekly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: 1 << dow.min, + }, nil + + case "@daily", "@midnight": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: all(dow), + }, nil + + case "@hourly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: all(hours), + Dom: all(dom), + Month: all(months), + Dow: all(dow), + }, nil + } + + const every = "@every " + if strings.HasPrefix(descriptor, every) { + duration, err := time.ParseDuration(descriptor[len(every):]) + if err != nil { + return nil, fmt.Errorf("Failed to parse duration %s: %s", descriptor, err) + } + return Every(duration), nil + } + + return nil, fmt.Errorf("Unrecognized descriptor: %s", descriptor) +} diff --git a/vendor/github.com/robfig/cron/spec.go b/vendor/github.com/robfig/cron/spec.go new file mode 100644 index 0000000000..aac9a60b95 --- /dev/null +++ b/vendor/github.com/robfig/cron/spec.go @@ -0,0 +1,158 @@ +package cron + +import "time" + +// SpecSchedule specifies a duty cycle (to the second granularity), based on a +// traditional crontab specification. It is computed initially and stored as bit sets. +type SpecSchedule struct { + Second, Minute, Hour, Dom, Month, Dow uint64 +} + +// bounds provides a range of acceptable values (plus a map of name to value). +type bounds struct { + min, max uint + names map[string]uint +} + +// The bounds for each field. +var ( + seconds = bounds{0, 59, nil} + minutes = bounds{0, 59, nil} + hours = bounds{0, 23, nil} + dom = bounds{1, 31, nil} + months = bounds{1, 12, map[string]uint{ + "jan": 1, + "feb": 2, + "mar": 3, + "apr": 4, + "may": 5, + "jun": 6, + "jul": 7, + "aug": 8, + "sep": 9, + "oct": 10, + "nov": 11, + "dec": 12, + }} + dow = bounds{0, 6, map[string]uint{ + "sun": 0, + "mon": 1, + "tue": 2, + "wed": 3, + "thu": 4, + "fri": 5, + "sat": 6, + }} +) + +const ( + // Set the top bit if a star was included in the expression. + starBit = 1 << 63 +) + +// Next returns the next time this schedule is activated, greater than the given +// time. If no time can be found to satisfy the schedule, return the zero time. +func (s *SpecSchedule) Next(t time.Time) time.Time { + // General approach: + // For Month, Day, Hour, Minute, Second: + // Check if the time value matches. If yes, continue to the next field. + // If the field doesn't match the schedule, then increment the field until it matches. + // While incrementing the field, a wrap-around brings it back to the beginning + // of the field list (since it is necessary to re-verify previous field + // values) + + // Start at the earliest possible time (the upcoming second). + t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + + // This flag indicates whether a field has been incremented. + added := false + + // If no time is found within five years, return zero. + yearLimit := t.Year() + 5 + +WRAP: + if t.Year() > yearLimit { + return time.Time{} + } + + // Find the first applicable month. + // If it's this month, then do nothing. + for 1< 0 + dowMatch bool = 1< 0 + ) + if s.Dom&starBit > 0 || s.Dow&starBit > 0 { + return domMatch && dowMatch + } + return domMatch || dowMatch +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go new file mode 100644 index 0000000000..9f20152e45 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validation contains generic api type validation functions. +package validation // import "k8s.io/apimachinery/pkg/api/validation" diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go new file mode 100644 index 0000000000..947c96f434 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go @@ -0,0 +1,86 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// IsNegativeErrorMsg is a error message for value must be greater than or equal to 0. +const IsNegativeErrorMsg string = `must be greater than or equal to 0` + +// ValidateNameFunc validates that the provided name is valid for a given resource type. +// Not all resources have the same validation rules for names. Prefix is true +// if the name will have a value appended to it. If the name is not valid, +// this returns a list of descriptions of individual characteristics of the +// value that were not valid. Otherwise this returns an empty list or nil. +type ValidateNameFunc func(name string, prefix bool) []string + +// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain. +func NameIsDNSSubdomain(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1123Subdomain(name) +} + +// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label. +func NameIsDNSLabel(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1123Label(name) +} + +// NameIsDNS1035Label is a ValidateNameFunc for names that must be a DNS 952 label. +func NameIsDNS1035Label(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1035Label(name) +} + +// ValidateNamespaceName can be used to check whether the given namespace name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateNamespaceName = NameIsDNSLabel + +// ValidateServiceAccountName can be used to check whether the given service account name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateServiceAccountName = NameIsDNSSubdomain + +// maskTrailingDash replaces the final character of a string with a subdomain safe +// value if is a dash. +func maskTrailingDash(name string) string { + if strings.HasSuffix(name, "-") { + return name[:len(name)-2] + "a" + } + return name +} + +// ValidateNonnegativeField validates that given value is not negative. +func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value < 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value, IsNegativeErrorMsg)) + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go new file mode 100644 index 0000000000..3e234eb11d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go @@ -0,0 +1,263 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strings" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// FieldImmutableErrorMsg is a error message for field is immutable. +const FieldImmutableErrorMsg string = `field is immutable` + +const totalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB + +// BannedOwners is a black list of object that are not allowed to be owners. +var BannedOwners = map[schema.GroupVersionKind]struct{}{ + {Group: "", Version: "v1", Kind: "Event"}: {}, +} + +// ValidateClusterName can be used to check whether the given cluster name is valid. +var ValidateClusterName = NameIsDNS1035Label + +// ValidateAnnotations validates that a set of annotations are correctly defined. +func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + var totalSize int64 + for k, v := range annotations { + for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) { + allErrs = append(allErrs, field.Invalid(fldPath, k, msg)) + } + totalSize += (int64)(len(k)) + (int64)(len(v)) + } + if totalSize > (int64)(totalAnnotationSizeLimitB) { + allErrs = append(allErrs, field.TooLong(fldPath, "", totalAnnotationSizeLimitB)) + } + return allErrs +} + +func validateOwnerReference(ownerReference metav1.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + gvk := schema.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind) + // gvk.Group is empty for the legacy group. + if len(gvk.Version) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ownerReference.APIVersion, "version must not be empty")) + } + if len(gvk.Kind) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "kind must not be empty")) + } + if len(ownerReference.Name) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "name must not be empty")) + } + if len(ownerReference.UID) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "uid must not be empty")) + } + if _, ok := BannedOwners[gvk]; ok { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf("%s is disallowed from being an owner", gvk))) + } + return allErrs +} + +// ValidateOwnerReferences validates that a set of owner references are correctly defined. +func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + controllerName := "" + for _, ref := range ownerReferences { + allErrs = append(allErrs, validateOwnerReference(ref, fldPath)...) + if ref.Controller != nil && *ref.Controller { + if controllerName != "" { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReferences, + fmt.Sprintf("Only one reference can have Controller set to true. Found \"true\" in references for %v and %v", controllerName, ref.Name))) + } else { + controllerName = ref.Name + } + } + } + return allErrs +} + +// ValidateFinalizerName validates finalizer names. +func ValidateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(stringValue) { + allErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg)) + } + + return allErrs +} + +// ValidateNoNewFinalizers validates the new finalizers has no new finalizers compare to old finalizers. +func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + extra := sets.NewString(newFinalizers...).Difference(sets.NewString(oldFinalizers...)) + if len(extra) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("no new finalizers can be added if the object is being deleted, found new finalizers %#v", extra.List()))) + } + return allErrs +} + +// ValidateImmutableField validates the new value and the old value are deeply equal. +func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if !apiequality.Semantic.DeepEqual(oldVal, newVal) { + allErrs = append(allErrs, field.Invalid(fldPath, newVal, FieldImmutableErrorMsg)) + } + return allErrs +} + +// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +func ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + metadata, err := meta.Accessor(objMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, objMeta, err.Error())) + return allErrs + } + return ValidateObjectMetaAccessor(metadata, requiresNamespace, nameFn, fldPath) +} + +// ValidateObjectMetaAccessor validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(meta.GetGenerateName()) != 0 { + for _, msg := range nameFn(meta.GetGenerateName(), true) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GetGenerateName(), msg)) + } + } + // If the generated name validates, but the calculated value does not, it's a problem with generation, and we + // report it here. This may confuse users, but indicates a programming bug and still must be validated. + // If there are multiple fields out of which one is required then add an or as a separator + if len(meta.GetName()) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required")) + } else { + for _, msg := range nameFn(meta.GetName(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.GetName(), msg)) + } + } + if requiresNamespace { + if len(meta.GetNamespace()) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "")) + } else { + for _, msg := range ValidateNamespaceName(meta.GetNamespace(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.GetNamespace(), msg)) + } + } + } else { + if len(meta.GetNamespace()) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("namespace"), "not allowed on this type")) + } + } + if len(meta.GetClusterName()) != 0 { + for _, msg := range ValidateClusterName(meta.GetClusterName(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("clusterName"), meta.GetClusterName(), msg)) + } + } + + allErrs = append(allErrs, ValidateNonnegativeField(meta.GetGeneration(), fldPath.Child("generation"))...) + allErrs = append(allErrs, v1validation.ValidateLabels(meta.GetLabels(), fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(meta.GetAnnotations(), fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(meta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) + allErrs = append(allErrs, ValidateFinalizers(meta.GetFinalizers(), fldPath.Child("finalizers"))...) + allErrs = append(allErrs, v1validation.ValidateManagedFields(meta.GetManagedFields(), fldPath.Child("managedFields"))...) + return allErrs +} + +// ValidateFinalizers tests if the finalizers name are valid, and if there are conflicting finalizers. +func ValidateFinalizers(finalizers []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + hasFinalizerOrphanDependents := false + hasFinalizerDeleteDependents := false + for _, finalizer := range finalizers { + allErrs = append(allErrs, ValidateFinalizerName(finalizer, fldPath)...) + if finalizer == metav1.FinalizerOrphanDependents { + hasFinalizerOrphanDependents = true + } + if finalizer == metav1.FinalizerDeleteDependents { + hasFinalizerDeleteDependents = true + } + } + if hasFinalizerDeleteDependents && hasFinalizerOrphanDependents { + allErrs = append(allErrs, field.Invalid(fldPath, finalizers, fmt.Sprintf("finalizer %s and %s cannot be both set", metav1.FinalizerOrphanDependents, metav1.FinalizerDeleteDependents))) + } + return allErrs +} + +// ValidateObjectMetaUpdate validates an object's metadata when updated. +func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList { + newMetadata, err := meta.Accessor(newMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, newMeta, err.Error())) + return allErrs + } + oldMetadata, err := meta.Accessor(oldMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, oldMeta, err.Error())) + return allErrs + } + return ValidateObjectMetaAccessorUpdate(newMetadata, oldMetadata, fldPath) +} + +// ValidateObjectMetaAccessorUpdate validates an object's metadata when updated. +func ValidateObjectMetaAccessorUpdate(newMeta, oldMeta metav1.Object, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + // Finalizers cannot be added if the object is already being deleted. + if oldMeta.GetDeletionTimestamp() != nil { + allErrs = append(allErrs, ValidateNoNewFinalizers(newMeta.GetFinalizers(), oldMeta.GetFinalizers(), fldPath.Child("finalizers"))...) + } + + // Reject updates that don't specify a resource version + if len(newMeta.GetResourceVersion()) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceVersion"), newMeta.GetResourceVersion(), "must be specified for an update")) + } + + // Generation shouldn't be decremented + if newMeta.GetGeneration() < oldMeta.GetGeneration() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generation"), newMeta.GetGeneration(), "must not be decremented")) + } + + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetName(), oldMeta.GetName(), fldPath.Child("name"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetNamespace(), oldMeta.GetNamespace(), fldPath.Child("namespace"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetUID(), oldMeta.GetUID(), fldPath.Child("uid"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetCreationTimestamp(), oldMeta.GetCreationTimestamp(), fldPath.Child("creationTimestamp"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetDeletionTimestamp(), oldMeta.GetDeletionTimestamp(), fldPath.Child("deletionTimestamp"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetDeletionGracePeriodSeconds(), oldMeta.GetDeletionGracePeriodSeconds(), fldPath.Child("deletionGracePeriodSeconds"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetClusterName(), oldMeta.GetClusterName(), fldPath.Child("clusterName"))...) + + allErrs = append(allErrs, v1validation.ValidateLabels(newMeta.GetLabels(), fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(newMeta.GetAnnotations(), fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(newMeta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) + allErrs = append(allErrs, v1validation.ValidateManagedFields(newMeta.GetManagedFields(), fldPath.Child("managedFields"))...) + + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go new file mode 100644 index 0000000000..a5a7f144ad --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -0,0 +1,264 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "regexp" + "unicode" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func ValidateLabelSelector(ps *metav1.LabelSelector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if ps == nil { + return allErrs + } + allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) + for i, expr := range ps.MatchExpressions { + allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...) + } + return allErrs +} + +func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + switch sr.Operator { + case metav1.LabelSelectorOpIn, metav1.LabelSelectorOpNotIn: + if len(sr.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case metav1.LabelSelectorOpExists, metav1.LabelSelectorOpDoesNotExist: + if len(sr.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + } + allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) + return allErrs +} + +// ValidateLabelName validates that the label name is correctly defined. +func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(labelName) { + allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg)) + } + return allErrs +} + +// ValidateLabels validates that a set of labels are correctly defined. +func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for k, v := range labels { + allErrs = append(allErrs, ValidateLabelName(k, fldPath)...) + for _, msg := range validation.IsValidLabelValue(v) { + allErrs = append(allErrs, field.Invalid(fldPath, v, msg)) + } + } + return allErrs +} + +func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { + allErrs := field.ErrorList{} + if options.OrphanDependents != nil && options.PropagationPolicy != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("propagationPolicy"), options.PropagationPolicy, "orphanDependents and deletionPropagation cannot be both set")) + } + if options.PropagationPolicy != nil && + *options.PropagationPolicy != metav1.DeletePropagationForeground && + *options.PropagationPolicy != metav1.DeletePropagationBackground && + *options.PropagationPolicy != metav1.DeletePropagationOrphan { + allErrs = append(allErrs, field.NotSupported(field.NewPath("propagationPolicy"), options.PropagationPolicy, []string{string(metav1.DeletePropagationForeground), string(metav1.DeletePropagationBackground), string(metav1.DeletePropagationOrphan), "nil"})) + } + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + return allErrs +} + +func ValidateCreateOptions(options *metav1.CreateOptions) field.ErrorList { + return append( + ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager")), + ValidateDryRun(field.NewPath("dryRun"), options.DryRun)..., + ) +} + +func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList { + return append( + ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager")), + ValidateDryRun(field.NewPath("dryRun"), options.DryRun)..., + ) +} + +func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchType) field.ErrorList { + allErrs := field.ErrorList{} + if patchType != types.ApplyPatchType { + if options.Force != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch")) + } + } else { + if options.FieldManager == "" { + // This field is defaulted to "kubectl" by kubectl, but HAS TO be explicitly set by controllers. + allErrs = append(allErrs, field.Required(field.NewPath("fieldManager"), "is required for apply patch")) + } + } + allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + return allErrs +} + +var FieldManagerMaxLength = 128 + +// ValidateFieldManager valides that the fieldManager is the proper length and +// only has printable characters. +func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // the field can not be set as a `*string`, so a empty string ("") is + // considered as not set and is defaulted by the rest of the process + // (unless apply is used, in which case it is required). + if len(fieldManager) > FieldManagerMaxLength { + allErrs = append(allErrs, field.TooLong(fldPath, fieldManager, FieldManagerMaxLength)) + } + // Verify that all characters are printable. + for i, r := range fieldManager { + if !unicode.IsPrint(r) { + allErrs = append(allErrs, field.Invalid(fldPath, fieldManager, fmt.Sprintf("invalid character %#U (at position %d)", r, i))) + } + } + + return allErrs +} + +var allowedDryRunValues = sets.NewString(metav1.DryRunAll) + +// ValidateDryRun validates that a dryRun query param only contains allowed values. +func ValidateDryRun(fldPath *field.Path, dryRun []string) field.ErrorList { + allErrs := field.ErrorList{} + if !allowedDryRunValues.HasAll(dryRun...) { + allErrs = append(allErrs, field.NotSupported(fldPath, dryRun, allowedDryRunValues.List())) + } + return allErrs +} + +const UninitializedStatusUpdateErrorMsg string = `must not update status when the object is uninitialized` + +// ValidateTableOptions returns any invalid flags on TableOptions. +func ValidateTableOptions(opts *metav1.TableOptions) field.ErrorList { + var allErrs field.ErrorList + switch opts.IncludeObject { + case metav1.IncludeMetadata, metav1.IncludeNone, metav1.IncludeObject, "": + default: + allErrs = append(allErrs, field.Invalid(field.NewPath("includeObject"), opts.IncludeObject, "must be 'Metadata', 'Object', 'None', or empty")) + } + return allErrs +} + +func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for i, fields := range fieldsList { + fldPath := fldPath.Index(i) + switch fields.Operation { + case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate: + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operation"), fields.Operation, "must be `Apply` or `Update`")) + } + if len(fields.FieldsType) > 0 && fields.FieldsType != "FieldsV1" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldsType"), fields.FieldsType, "must be `FieldsV1`")) + } + allErrs = append(allErrs, ValidateFieldManager(fields.Manager, fldPath.Child("manager"))...) + } + return allErrs +} + +func ValidateConditions(conditions []metav1.Condition, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + conditionTypeToFirstIndex := map[string]int{} + for i, condition := range conditions { + if _, ok := conditionTypeToFirstIndex[condition.Type]; ok { + allErrs = append(allErrs, field.Duplicate(fldPath.Index(i).Child("type"), condition.Type)) + } else { + conditionTypeToFirstIndex[condition.Type] = i + } + + allErrs = append(allErrs, ValidateCondition(condition, fldPath.Index(i))...) + } + + return allErrs +} + +// validConditionStatuses is used internally to check validity and provide a good message +var validConditionStatuses = sets.NewString(string(metav1.ConditionTrue), string(metav1.ConditionFalse), string(metav1.ConditionUnknown)) + +const ( + maxReasonLen = 1 * 1024 + maxMessageLen = 32 * 1024 +) + +func ValidateCondition(condition metav1.Condition, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + // type is set and is a valid format + allErrs = append(allErrs, ValidateLabelName(condition.Type, fldPath.Child("type"))...) + + // status is set and is an accepted value + if !validConditionStatuses.Has(string(condition.Status)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("status"), condition.Status, validConditionStatuses.List())) + } + + if condition.ObservedGeneration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("observedGeneration"), condition.ObservedGeneration, "must be greater than or equal to zero")) + } + + if condition.LastTransitionTime.IsZero() { + allErrs = append(allErrs, field.Required(fldPath.Child("lastTransitionTime"), "must be set")) + } + + if len(condition.Reason) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("reason"), "must be set")) + } else { + for _, currErr := range isValidConditionReason(condition.Reason) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("reason"), condition.Reason, currErr)) + } + if len(condition.Reason) > maxReasonLen { + allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), condition.Reason, maxReasonLen)) + } + } + + if len(condition.Message) > maxMessageLen { + allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), condition.Message, maxMessageLen)) + } + + return allErrs +} + +const conditionReasonFmt string = "[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?" +const conditionReasonErrMsg string = "a condition reason must start with alphabetic character, optionally followed by a string of alphanumeric characters or '_,:', and must end with an alphanumeric character or '_'" + +var conditionReasonRegexp = regexp.MustCompile("^" + conditionReasonFmt + "$") + +// isValidConditionReason tests for a string that conforms to rules for condition reasons. This checks the format, but not the length. +func isValidConditionReason(value string) []string { + if !conditionReasonRegexp.MatchString(value) { + return []string{validation.RegexError(conditionReasonErrMsg, conditionReasonFmt, "my_name", "MY_NAME", "MyName", "ReasonA,ReasonB", "ReasonA:ReasonB")} + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go new file mode 100644 index 0000000000..f0dc076763 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go @@ -0,0 +1,183 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaccount + +import ( + "context" + "fmt" + "strings" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/authentication/user" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + + "k8s.io/klog/v2" +) + +const ( + ServiceAccountUsernamePrefix = "system:serviceaccount:" + ServiceAccountUsernameSeparator = ":" + ServiceAccountGroupPrefix = "system:serviceaccounts:" + AllServiceAccountsGroup = "system:serviceaccounts" + // PodNameKey is the key used in a user's "extra" to specify the pod name of + // the authenticating request. + PodNameKey = "authentication.kubernetes.io/pod-name" + // PodUIDKey is the key used in a user's "extra" to specify the pod UID of + // the authenticating request. + PodUIDKey = "authentication.kubernetes.io/pod-uid" +) + +// MakeUsername generates a username from the given namespace and ServiceAccount name. +// The resulting username can be passed to SplitUsername to extract the original namespace and ServiceAccount name. +func MakeUsername(namespace, name string) string { + return ServiceAccountUsernamePrefix + namespace + ServiceAccountUsernameSeparator + name +} + +// MatchesUsername checks whether the provided username matches the namespace and name without +// allocating. Use this when checking a service account namespace and name against a known string. +func MatchesUsername(namespace, name string, username string) bool { + if !strings.HasPrefix(username, ServiceAccountUsernamePrefix) { + return false + } + username = username[len(ServiceAccountUsernamePrefix):] + + if !strings.HasPrefix(username, namespace) { + return false + } + username = username[len(namespace):] + + if !strings.HasPrefix(username, ServiceAccountUsernameSeparator) { + return false + } + username = username[len(ServiceAccountUsernameSeparator):] + + return username == name +} + +var invalidUsernameErr = fmt.Errorf("Username must be in the form %s", MakeUsername("namespace", "name")) + +// SplitUsername returns the namespace and ServiceAccount name embedded in the given username, +// or an error if the username is not a valid name produced by MakeUsername +func SplitUsername(username string) (string, string, error) { + if !strings.HasPrefix(username, ServiceAccountUsernamePrefix) { + return "", "", invalidUsernameErr + } + trimmed := strings.TrimPrefix(username, ServiceAccountUsernamePrefix) + parts := strings.Split(trimmed, ServiceAccountUsernameSeparator) + if len(parts) != 2 { + return "", "", invalidUsernameErr + } + namespace, name := parts[0], parts[1] + if len(apimachineryvalidation.ValidateNamespaceName(namespace, false)) != 0 { + return "", "", invalidUsernameErr + } + if len(apimachineryvalidation.ValidateServiceAccountName(name, false)) != 0 { + return "", "", invalidUsernameErr + } + return namespace, name, nil +} + +// MakeGroupNames generates service account group names for the given namespace +func MakeGroupNames(namespace string) []string { + return []string{ + AllServiceAccountsGroup, + MakeNamespaceGroupName(namespace), + } +} + +// MakeNamespaceGroupName returns the name of the group all service accounts in the namespace are included in +func MakeNamespaceGroupName(namespace string) string { + return ServiceAccountGroupPrefix + namespace +} + +// UserInfo returns a user.Info interface for the given namespace, service account name and UID +func UserInfo(namespace, name, uid string) user.Info { + return (&ServiceAccountInfo{ + Name: name, + Namespace: namespace, + UID: uid, + }).UserInfo() +} + +type ServiceAccountInfo struct { + Name, Namespace, UID string + PodName, PodUID string +} + +func (sa *ServiceAccountInfo) UserInfo() user.Info { + info := &user.DefaultInfo{ + Name: MakeUsername(sa.Namespace, sa.Name), + UID: sa.UID, + Groups: MakeGroupNames(sa.Namespace), + } + if sa.PodName != "" && sa.PodUID != "" { + info.Extra = map[string][]string{ + PodNameKey: {sa.PodName}, + PodUIDKey: {sa.PodUID}, + } + } + return info +} + +// IsServiceAccountToken returns true if the secret is a valid api token for the service account +func IsServiceAccountToken(secret *v1.Secret, sa *v1.ServiceAccount) bool { + if secret.Type != v1.SecretTypeServiceAccountToken { + return false + } + + name := secret.Annotations[v1.ServiceAccountNameKey] + uid := secret.Annotations[v1.ServiceAccountUIDKey] + if name != sa.Name { + // Name must match + return false + } + if len(uid) > 0 && uid != string(sa.UID) { + // If UID is specified, it must match + return false + } + + return true +} + +func GetOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, name string) (*v1.ServiceAccount, error) { + sa, err := coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + return sa, nil + } + if !apierrors.IsNotFound(err) { + return nil, err + } + + // Create the namespace if we can't verify it exists. + // Tolerate errors, since we don't know whether this component has namespace creation permissions. + if _, err := coreClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) { + if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { + klog.Warningf("create non-exist namespace %s failed:%v", namespace, err) + } + } + + // Create the service account + sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + // If we're racing to init and someone else already created it, re-fetch + return coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + } + return sa, err +} diff --git a/vendor/k8s.io/client-go/tools/record/OWNERS b/vendor/k8s.io/client-go/tools/record/OWNERS new file mode 100644 index 0000000000..e7e739b150 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- sig-instrumentation-reviewers +approvers: +- sig-instrumentation-approvers diff --git a/vendor/k8s.io/client-go/tools/record/doc.go b/vendor/k8s.io/client-go/tools/record/doc.go new file mode 100644 index 0000000000..33d5fe78ee --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package record has all client logic for recording and reporting +// "k8s.io/api/core/v1".Event events. +package record // import "k8s.io/client-go/tools/record" diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go new file mode 100644 index 0000000000..30a6660198 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -0,0 +1,383 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + "math/rand" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record/util" + ref "k8s.io/client-go/tools/reference" + "k8s.io/klog/v2" +) + +const maxTriesPerEvent = 12 + +var defaultSleepDuration = 10 * time.Second + +const maxQueuedEvents = 1000 + +// EventSink knows how to store events (client.Client implements it.) +// EventSink must respect the namespace that will be embedded in 'event'. +// It is assumed that EventSink will return the same sorts of errors as +// pkg/client's REST client. +type EventSink interface { + Create(event *v1.Event) (*v1.Event, error) + Update(event *v1.Event) (*v1.Event, error) + Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error) +} + +// CorrelatorOptions allows you to change the default of the EventSourceObjectSpamFilter +// and EventAggregator in EventCorrelator +type CorrelatorOptions struct { + // The lru cache size used for both EventSourceObjectSpamFilter and the EventAggregator + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the LRUCacheSize has to be greater than 0. + LRUCacheSize int + // The burst size used by the token bucket rate filtering in EventSourceObjectSpamFilter + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the BurstSize has to be greater than 0. + BurstSize int + // The fill rate of the token bucket in queries per second in EventSourceObjectSpamFilter + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the QPS has to be greater than 0. + QPS float32 + // The func used by the EventAggregator to group event keys for aggregation + // If not specified (zero value), EventAggregatorByReasonFunc will be used + KeyFunc EventAggregatorKeyFunc + // The func used by the EventAggregator to produced aggregated message + // If not specified (zero value), EventAggregatorByReasonMessageFunc will be used + MessageFunc EventAggregatorMessageFunc + // The number of events in an interval before aggregation happens by the EventAggregator + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the MaxEvents has to be greater than 0 + MaxEvents int + // The amount of time in seconds that must transpire since the last occurrence of a similar event before it is considered new by the EventAggregator + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the MaxIntervalInSeconds has to be greater than 0 + MaxIntervalInSeconds int + // The clock used by the EventAggregator to allow for testing + // If not specified (zero value), clock.RealClock{} will be used + Clock clock.Clock +} + +// EventRecorder knows how to record events on behalf of an EventSource. +type EventRecorder interface { + // Event constructs an event from the given information and puts it in the queue for sending. + // 'object' is the object this event is about. Event will make a reference-- or you may also + // pass a reference to the object directly. + // 'type' of this event, and can be one of Normal, Warning. New types could be added in future + // 'reason' is the reason this event is generated. 'reason' should be short and unique; it + // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used + // to automate handling of events, so imagine people writing switch statements to handle them. + // You want to make that easy. + // 'message' is intended to be human readable. + // + // The resulting event will be created in the same namespace as the reference object. + Event(object runtime.Object, eventtype, reason, message string) + + // Eventf is just like Event, but with Sprintf for the message field. + Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) + + // AnnotatedEventf is just like eventf, but with annotations attached + AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) +} + +// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. +type EventBroadcaster interface { + // StartEventWatcher starts sending events received from this EventBroadcaster to the given + // event handler function. The return value can be ignored or used to stop recording, if + // desired. + StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface + + // StartRecordingToSink starts sending events received from this EventBroadcaster to the given + // sink. The return value can be ignored or used to stop recording, if desired. + StartRecordingToSink(sink EventSink) watch.Interface + + // StartLogging starts sending events received from this EventBroadcaster to the given logging + // function. The return value can be ignored or used to stop recording, if desired. + StartLogging(logf func(format string, args ...interface{})) watch.Interface + + // StartStructuredLogging starts sending events received from this EventBroadcaster to the structured + // logging function. The return value can be ignored or used to stop recording, if desired. + StartStructuredLogging(verbosity klog.Level) watch.Interface + + // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster + // with the event source set to the given event source. + NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder + + // Shutdown shuts down the broadcaster + Shutdown() +} + +// EventRecorderAdapter is a wrapper around a "k8s.io/client-go/tools/record".EventRecorder +// implementing the new "k8s.io/client-go/tools/events".EventRecorder interface. +type EventRecorderAdapter struct { + recorder EventRecorder +} + +// NewEventRecorderAdapter returns an adapter implementing the new +// "k8s.io/client-go/tools/events".EventRecorder interface. +func NewEventRecorderAdapter(recorder EventRecorder) *EventRecorderAdapter { + return &EventRecorderAdapter{ + recorder: recorder, + } +} + +// Eventf is a wrapper around v1 Eventf +func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + a.recorder.Eventf(regarding, eventtype, reason, note, args...) +} + +// Creates a new event broadcaster. +func NewBroadcaster() EventBroadcaster { + return &eventBroadcasterImpl{ + Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + sleepDuration: defaultSleepDuration, + } +} + +func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { + return &eventBroadcasterImpl{ + Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + sleepDuration: sleepDuration, + } +} + +func NewBroadcasterWithCorrelatorOptions(options CorrelatorOptions) EventBroadcaster { + return &eventBroadcasterImpl{ + Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + sleepDuration: defaultSleepDuration, + options: options, + } +} + +type eventBroadcasterImpl struct { + *watch.Broadcaster + sleepDuration time.Duration + options CorrelatorOptions +} + +// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +// The return value can be ignored or used to stop recording, if desired. +// TODO: make me an object with parameterizable queue length and retry interval +func (e *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { + eventCorrelator := NewEventCorrelatorWithOptions(e.options) + return e.StartEventWatcher( + func(event *v1.Event) { + recordToSink(sink, event, eventCorrelator, e.sleepDuration) + }) +} + +func (e *eventBroadcasterImpl) Shutdown() { + e.Broadcaster.Shutdown() +} + +func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, sleepDuration time.Duration) { + // Make a copy before modification, because there could be multiple listeners. + // Events are safe to copy like this. + eventCopy := *event + event = &eventCopy + result, err := eventCorrelator.EventCorrelate(event) + if err != nil { + utilruntime.HandleError(err) + } + if result.Skip { + return + } + tries := 0 + for { + if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { + break + } + tries++ + if tries >= maxTriesPerEvent { + klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + break + } + // Randomize the first sleep so that various clients won't all be + // synced up if the master goes down. + if tries == 1 { + time.Sleep(time.Duration(float64(sleepDuration) * rand.Float64())) + } else { + time.Sleep(sleepDuration) + } + } +} + +// recordEvent attempts to write event to a sink. It returns true if the event +// was successfully recorded or discarded, false if it should be retried. +// If updateExistingEvent is false, it creates a new event, otherwise it updates +// existing event. +func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { + var newEvent *v1.Event + var err error + if updateExistingEvent { + newEvent, err = sink.Patch(event, patch) + } + // Update can fail because the event may have been removed and it no longer exists. + if !updateExistingEvent || (updateExistingEvent && util.IsKeyNotFoundError(err)) { + // Making sure that ResourceVersion is empty on creation + event.ResourceVersion = "" + newEvent, err = sink.Create(event) + } + if err == nil { + // we need to update our event correlator with the server returned state to handle name/resourceversion + eventCorrelator.UpdateState(newEvent) + return true + } + + // If we can't contact the server, then hold everything while we keep trying. + // Otherwise, something about the event is malformed and we should abandon it. + switch err.(type) { + case *restclient.RequestConstructionError: + // We will construct the request the same next time, so don't keep trying. + klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + return true + case *errors.StatusError: + if errors.IsAlreadyExists(err) { + klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } else { + klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } + return true + case *errors.UnexpectedObjectError: + // We don't expect this; it implies the server's response didn't match a + // known pattern. Go ahead and retry. + default: + // This case includes actual http transport errors. Go ahead and retry. + } + klog.Errorf("Unable to write event: '%#v': '%v'(may retry after sleeping)", event, err) + return false +} + +// StartLogging starts sending events received from this EventBroadcaster to the given logging function. +// The return value can be ignored or used to stop recording, if desired. +func (e *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { + return e.StartEventWatcher( + func(e *v1.Event) { + logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) + }) +} + +// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function. +// The return value can be ignored or used to stop recording, if desired. +func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watch.Interface { + return e.StartEventWatcher( + func(e *v1.Event) { + klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message) + }) +} + +// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. +// The return value can be ignored or used to stop recording, if desired. +func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { + watcher := e.Watch() + go func() { + defer utilruntime.HandleCrash() + for watchEvent := range watcher.ResultChan() { + event, ok := watchEvent.Object.(*v1.Event) + if !ok { + // This is all local, so there's no reason this should + // ever happen. + continue + } + eventHandler(event) + } + }() + return watcher +} + +// NewRecorder returns an EventRecorder that records events with the given event source. +func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { + return &recorderImpl{scheme, source, e.Broadcaster, clock.RealClock{}} +} + +type recorderImpl struct { + scheme *runtime.Scheme + source v1.EventSource + *watch.Broadcaster + clock clock.Clock +} + +func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, eventtype, reason, message string) { + ref, err := ref.GetReference(recorder.scheme, object) + if err != nil { + klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) + return + } + + if !util.ValidateEventType(eventtype) { + klog.Errorf("Unsupported event type: '%v'", eventtype) + return + } + + event := recorder.makeEvent(ref, annotations, eventtype, reason, message) + event.Source = recorder.source + + // NOTE: events should be a non-blocking operation, but we also need to not + // put this in a goroutine, otherwise we'll race to write to a closed channel + // when we go to shut down this broadcaster. Just drop events if we get overloaded, + // and log an error if that happens (we've configured the broadcaster to drop + // outgoing events anyway). + if sent := recorder.ActionOrDrop(watch.Added, event); !sent { + klog.Errorf("unable to record event: too many queued events, dropped event %#v", event) + } +} + +func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { + recorder.generateEvent(object, nil, eventtype, reason, message) +} + +func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event { + t := metav1.Time{Time: recorder.clock.Now()} + namespace := ref.Namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + return &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), + Namespace: namespace, + Annotations: annotations, + }, + InvolvedObject: *ref, + Reason: reason, + Message: message, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventtype, + } +} diff --git a/vendor/k8s.io/client-go/tools/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go new file mode 100644 index 0000000000..9374612f26 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/events_cache.go @@ -0,0 +1,511 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/golang/groupcache/lru" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/util/flowcontrol" +) + +const ( + maxLruCacheEntries = 4096 + + // if we see the same event that varies only by message + // more than 10 times in a 10 minute period, aggregate the event + defaultAggregateMaxEvents = 10 + defaultAggregateIntervalInSeconds = 600 + + // by default, allow a source to send 25 events about an object + // but control the refill rate to 1 new event every 5 minutes + // this helps control the long-tail of events for things that are always + // unhealthy + defaultSpamBurst = 25 + defaultSpamQPS = 1. / 300. +) + +// getEventKey builds unique event key based on source, involvedObject, reason, message +func getEventKey(event *v1.Event) string { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + event.InvolvedObject.FieldPath, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + event.Message, + }, + "") +} + +// getSpamKey builds unique event key based on source, involvedObject +func getSpamKey(event *v1.Event) string { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + }, + "") +} + +// EventFilterFunc is a function that returns true if the event should be skipped +type EventFilterFunc func(event *v1.Event) bool + +// EventSourceObjectSpamFilter is responsible for throttling +// the amount of events a source and object can produce. +type EventSourceObjectSpamFilter struct { + sync.RWMutex + + // the cache that manages last synced state + cache *lru.Cache + + // burst is the amount of events we allow per source + object + burst int + + // qps is the refill rate of the token bucket in queries per second + qps float32 + + // clock is used to allow for testing over a time interval + clock clock.Clock +} + +// NewEventSourceObjectSpamFilter allows burst events from a source about an object with the specified qps refill. +func NewEventSourceObjectSpamFilter(lruCacheSize, burst int, qps float32, clock clock.Clock) *EventSourceObjectSpamFilter { + return &EventSourceObjectSpamFilter{ + cache: lru.New(lruCacheSize), + burst: burst, + qps: qps, + clock: clock, + } +} + +// spamRecord holds data used to perform spam filtering decisions. +type spamRecord struct { + // rateLimiter controls the rate of events about this object + rateLimiter flowcontrol.RateLimiter +} + +// Filter controls that a given source+object are not exceeding the allowed rate. +func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool { + var record spamRecord + + // controls our cached information about this event (source+object) + eventKey := getSpamKey(event) + + // do we have a record of similar events in our cache? + f.Lock() + defer f.Unlock() + value, found := f.cache.Get(eventKey) + if found { + record = value.(spamRecord) + } + + // verify we have a rate limiter for this record + if record.rateLimiter == nil { + record.rateLimiter = flowcontrol.NewTokenBucketRateLimiterWithClock(f.qps, f.burst, f.clock) + } + + // ensure we have available rate + filter := !record.rateLimiter.TryAccept() + + // update the cache + f.cache.Add(eventKey, record) + + return filter +} + +// EventAggregatorKeyFunc is responsible for grouping events for aggregation +// It returns a tuple of the following: +// aggregateKey - key the identifies the aggregate group to bucket this event +// localKey - key that makes this event in the local group +type EventAggregatorKeyFunc func(event *v1.Event) (aggregateKey string, localKey string) + +// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type, +// event.Reason, event.ReportingController and event.ReportingInstance +func EventAggregatorByReasonFunc(event *v1.Event) (string, string) { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + event.ReportingController, + event.ReportingInstance, + }, + ""), event.Message +} + +// EventAggregatorMessageFunc is responsible for producing an aggregation message +type EventAggregatorMessageFunc func(event *v1.Event) string + +// EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message +func EventAggregatorByReasonMessageFunc(event *v1.Event) string { + return "(combined from similar events): " + event.Message +} + +// EventAggregator identifies similar events and aggregates them into a single event +type EventAggregator struct { + sync.RWMutex + + // The cache that manages aggregation state + cache *lru.Cache + + // The function that groups events for aggregation + keyFunc EventAggregatorKeyFunc + + // The function that generates a message for an aggregate event + messageFunc EventAggregatorMessageFunc + + // The maximum number of events in the specified interval before aggregation occurs + maxEvents uint + + // The amount of time in seconds that must transpire since the last occurrence of a similar event before it's considered new + maxIntervalInSeconds uint + + // clock is used to allow for testing over a time interval + clock clock.Clock +} + +// NewEventAggregator returns a new instance of an EventAggregator +func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc, + maxEvents int, maxIntervalInSeconds int, clock clock.Clock) *EventAggregator { + return &EventAggregator{ + cache: lru.New(lruCacheSize), + keyFunc: keyFunc, + messageFunc: messageFunc, + maxEvents: uint(maxEvents), + maxIntervalInSeconds: uint(maxIntervalInSeconds), + clock: clock, + } +} + +// aggregateRecord holds data used to perform aggregation decisions +type aggregateRecord struct { + // we track the number of unique local keys we have seen in the aggregate set to know when to actually aggregate + // if the size of this set exceeds the max, we know we need to aggregate + localKeys sets.String + // The last time at which the aggregate was recorded + lastTimestamp metav1.Time +} + +// EventAggregate checks if a similar event has been seen according to the +// aggregation configuration (max events, max interval, etc) and returns: +// +// - The (potentially modified) event that should be created +// - The cache key for the event, for correlation purposes. This will be set to +// the full key for normal events, and to the result of +// EventAggregatorMessageFunc for aggregate events. +func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) { + now := metav1.NewTime(e.clock.Now()) + var record aggregateRecord + // eventKey is the full cache key for this event + eventKey := getEventKey(newEvent) + // aggregateKey is for the aggregate event, if one is needed. + aggregateKey, localKey := e.keyFunc(newEvent) + + // Do we have a record of similar events in our cache? + e.Lock() + defer e.Unlock() + value, found := e.cache.Get(aggregateKey) + if found { + record = value.(aggregateRecord) + } + + // Is the previous record too old? If so, make a fresh one. Note: if we didn't + // find a similar record, its lastTimestamp will be the zero value, so we + // create a new one in that case. + maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second + interval := now.Time.Sub(record.lastTimestamp.Time) + if interval > maxInterval { + record = aggregateRecord{localKeys: sets.NewString()} + } + + // Write the new event into the aggregation record and put it on the cache + record.localKeys.Insert(localKey) + record.lastTimestamp = now + e.cache.Add(aggregateKey, record) + + // If we are not yet over the threshold for unique events, don't correlate them + if uint(record.localKeys.Len()) < e.maxEvents { + return newEvent, eventKey + } + + // do not grow our local key set any larger than max + record.localKeys.PopAny() + + // create a new aggregate event, and return the aggregateKey as the cache key + // (so that it can be overwritten.) + eventCopy := &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()), + Namespace: newEvent.Namespace, + }, + Count: 1, + FirstTimestamp: now, + InvolvedObject: newEvent.InvolvedObject, + LastTimestamp: now, + Message: e.messageFunc(newEvent), + Type: newEvent.Type, + Reason: newEvent.Reason, + Source: newEvent.Source, + } + return eventCopy, aggregateKey +} + +// eventLog records data about when an event was observed +type eventLog struct { + // The number of times the event has occurred since first occurrence. + count uint + + // The time at which the event was first recorded. + firstTimestamp metav1.Time + + // The unique name of the first occurrence of this event + name string + + // Resource version returned from previous interaction with server + resourceVersion string +} + +// eventLogger logs occurrences of an event +type eventLogger struct { + sync.RWMutex + cache *lru.Cache + clock clock.Clock +} + +// newEventLogger observes events and counts their frequencies +func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger { + return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock} +} + +// eventObserve records an event, or updates an existing one if key is a cache hit +func (e *eventLogger) eventObserve(newEvent *v1.Event, key string) (*v1.Event, []byte, error) { + var ( + patch []byte + err error + ) + eventCopy := *newEvent + event := &eventCopy + + e.Lock() + defer e.Unlock() + + // Check if there is an existing event we should update + lastObservation := e.lastEventObservationFromCache(key) + + // If we found a result, prepare a patch + if lastObservation.count > 0 { + // update the event based on the last observation so patch will work as desired + event.Name = lastObservation.name + event.ResourceVersion = lastObservation.resourceVersion + event.FirstTimestamp = lastObservation.firstTimestamp + event.Count = int32(lastObservation.count) + 1 + + eventCopy2 := *event + eventCopy2.Count = 0 + eventCopy2.LastTimestamp = metav1.NewTime(time.Unix(0, 0)) + eventCopy2.Message = "" + + newData, _ := json.Marshal(event) + oldData, _ := json.Marshal(eventCopy2) + patch, err = strategicpatch.CreateTwoWayMergePatch(oldData, newData, event) + } + + // record our new observation + e.cache.Add( + key, + eventLog{ + count: uint(event.Count), + firstTimestamp: event.FirstTimestamp, + name: event.Name, + resourceVersion: event.ResourceVersion, + }, + ) + return event, patch, err +} + +// updateState updates its internal tracking information based on latest server state +func (e *eventLogger) updateState(event *v1.Event) { + key := getEventKey(event) + e.Lock() + defer e.Unlock() + // record our new observation + e.cache.Add( + key, + eventLog{ + count: uint(event.Count), + firstTimestamp: event.FirstTimestamp, + name: event.Name, + resourceVersion: event.ResourceVersion, + }, + ) +} + +// lastEventObservationFromCache returns the event from the cache, reads must be protected via external lock +func (e *eventLogger) lastEventObservationFromCache(key string) eventLog { + value, ok := e.cache.Get(key) + if ok { + observationValue, ok := value.(eventLog) + if ok { + return observationValue + } + } + return eventLog{} +} + +// EventCorrelator processes all incoming events and performs analysis to avoid overwhelming the system. It can filter all +// incoming events to see if the event should be filtered from further processing. It can aggregate similar events that occur +// frequently to protect the system from spamming events that are difficult for users to distinguish. It performs de-duplication +// to ensure events that are observed multiple times are compacted into a single event with increasing counts. +type EventCorrelator struct { + // the function to filter the event + filterFunc EventFilterFunc + // the object that performs event aggregation + aggregator *EventAggregator + // the object that observes events as they come through + logger *eventLogger +} + +// EventCorrelateResult is the result of a Correlate +type EventCorrelateResult struct { + // the event after correlation + Event *v1.Event + // if provided, perform a strategic patch when updating the record on the server + Patch []byte + // if true, do no further processing of the event + Skip bool +} + +// NewEventCorrelator returns an EventCorrelator configured with default values. +// +// The EventCorrelator is responsible for event filtering, aggregating, and counting +// prior to interacting with the API server to record the event. +// +// The default behavior is as follows: +// * Aggregation is performed if a similar event is recorded 10 times in a +// in a 10 minute rolling interval. A similar event is an event that varies only by +// the Event.Message field. Rather than recording the precise event, aggregation +// will create a new event whose message reports that it has combined events with +// the same reason. +// * Events are incrementally counted if the exact same event is encountered multiple +// times. +// * A source may burst 25 events about an object, but has a refill rate budget +// per object of 1 event every 5 minutes to control long-tail of spam. +func NewEventCorrelator(clock clock.Clock) *EventCorrelator { + cacheSize := maxLruCacheEntries + spamFilter := NewEventSourceObjectSpamFilter(cacheSize, defaultSpamBurst, defaultSpamQPS, clock) + return &EventCorrelator{ + filterFunc: spamFilter.Filter, + aggregator: NewEventAggregator( + cacheSize, + EventAggregatorByReasonFunc, + EventAggregatorByReasonMessageFunc, + defaultAggregateMaxEvents, + defaultAggregateIntervalInSeconds, + clock), + + logger: newEventLogger(cacheSize, clock), + } +} + +func NewEventCorrelatorWithOptions(options CorrelatorOptions) *EventCorrelator { + optionsWithDefaults := populateDefaults(options) + spamFilter := NewEventSourceObjectSpamFilter(optionsWithDefaults.LRUCacheSize, + optionsWithDefaults.BurstSize, optionsWithDefaults.QPS, optionsWithDefaults.Clock) + return &EventCorrelator{ + filterFunc: spamFilter.Filter, + aggregator: NewEventAggregator( + optionsWithDefaults.LRUCacheSize, + optionsWithDefaults.KeyFunc, + optionsWithDefaults.MessageFunc, + optionsWithDefaults.MaxEvents, + optionsWithDefaults.MaxIntervalInSeconds, + optionsWithDefaults.Clock), + logger: newEventLogger(optionsWithDefaults.LRUCacheSize, optionsWithDefaults.Clock), + } +} + +// populateDefaults populates the zero value options with defaults +func populateDefaults(options CorrelatorOptions) CorrelatorOptions { + if options.LRUCacheSize == 0 { + options.LRUCacheSize = maxLruCacheEntries + } + if options.BurstSize == 0 { + options.BurstSize = defaultSpamBurst + } + if options.QPS == 0 { + options.QPS = defaultSpamQPS + } + if options.KeyFunc == nil { + options.KeyFunc = EventAggregatorByReasonFunc + } + if options.MessageFunc == nil { + options.MessageFunc = EventAggregatorByReasonMessageFunc + } + if options.MaxEvents == 0 { + options.MaxEvents = defaultAggregateMaxEvents + } + if options.MaxIntervalInSeconds == 0 { + options.MaxIntervalInSeconds = defaultAggregateIntervalInSeconds + } + if options.Clock == nil { + options.Clock = clock.RealClock{} + } + return options +} + +// EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events +func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateResult, error) { + if newEvent == nil { + return nil, fmt.Errorf("event is nil") + } + aggregateEvent, ckey := c.aggregator.EventAggregate(newEvent) + observedEvent, patch, err := c.logger.eventObserve(aggregateEvent, ckey) + if c.filterFunc(observedEvent) { + return &EventCorrelateResult{Skip: true}, nil + } + return &EventCorrelateResult{Event: observedEvent, Patch: patch}, err +} + +// UpdateState based on the latest observed state from server +func (c *EventCorrelator) UpdateState(event *v1.Event) { + c.logger.updateState(event) +} diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go new file mode 100644 index 0000000000..0b3f344a97 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/fake.go @@ -0,0 +1,66 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" +) + +// FakeRecorder is used as a fake during tests. It is thread safe. It is usable +// when created manually and not by NewFakeRecorder, however all events may be +// thrown away in this case. +type FakeRecorder struct { + Events chan string + + IncludeObject bool +} + +func objectString(object runtime.Object, includeObject bool) string { + if !includeObject { + return "" + } + return fmt.Sprintf(" involvedObject{kind=%s,apiVersion=%s}", + object.GetObjectKind().GroupVersionKind().Kind, + object.GetObjectKind().GroupVersionKind().GroupVersion(), + ) +} + +func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { + if f.Events != nil { + f.Events <- fmt.Sprintf("%s %s %s%s", eventtype, reason, message, objectString(object, f.IncludeObject)) + } +} + +func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + if f.Events != nil { + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + objectString(object, f.IncludeObject) + } +} + +func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + f.Eventf(object, eventtype, reason, messageFmt, args...) +} + +// NewFakeRecorder creates new fake event recorder with event channel with +// buffer of given size. +func NewFakeRecorder(bufferSize int) *FakeRecorder { + return &FakeRecorder{ + Events: make(chan string, bufferSize), + } +} diff --git a/vendor/k8s.io/client-go/tools/record/util/util.go b/vendor/k8s.io/client-go/tools/record/util/util.go new file mode 100644 index 0000000000..d1818a8d90 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/util/util.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "net/http" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" +) + +// ValidateEventType checks that eventtype is an expected type of event +func ValidateEventType(eventtype string) bool { + switch eventtype { + case v1.EventTypeNormal, v1.EventTypeWarning: + return true + } + return false +} + +// IsKeyNotFoundError is utility function that checks if an error is not found error +func IsKeyNotFoundError(err error) bool { + statusErr, _ := err.(*errors.StatusError) + + if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { + return true + } + + return false +} diff --git a/vendor/k8s.io/component-base/metrics/OWNERS b/vendor/k8s.io/component-base/metrics/OWNERS new file mode 100644 index 0000000000..6010d505f6 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- sig-instrumentation-approvers +- logicalhan +- RainbowMango +reviewers: +- sig-instrumentation-reviewers +- YoyinZyc +labels: +- sig/instrumentation diff --git a/vendor/k8s.io/component-base/metrics/collector.go b/vendor/k8s.io/component-base/metrics/collector.go new file mode 100644 index 0000000000..61ad951e5b --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/collector.go @@ -0,0 +1,190 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" +) + +// StableCollector extends the prometheus.Collector interface to allow customization of the +// metric registration process, it's especially intend to be used in scenario of custom collector. +type StableCollector interface { + prometheus.Collector + + // DescribeWithStability sends the super-set of all possible metrics.Desc collected + // by this StableCollector to the provided channel. + DescribeWithStability(chan<- *Desc) + + // CollectWithStability sends each collected metrics.Metric via the provide channel. + CollectWithStability(chan<- Metric) + + // Create will initialize all Desc and it intends to be called by registry. + Create(version *semver.Version, self StableCollector) bool + + // ClearState will clear all the states marked by Create. + ClearState() + + // HiddenMetrics tells the list of hidden metrics with fqName. + HiddenMetrics() []string +} + +// BaseStableCollector which implements almost all of the methods defined by StableCollector +// is a convenient assistant for custom collectors. +// It is recommend that inherit BaseStableCollector when implementing custom collectors. +type BaseStableCollector struct { + descriptors map[string]*Desc // stores all descriptors by pair, these are collected from DescribeWithStability(). + registerable map[string]*Desc // stores registerable descriptors by pair, is a subset of descriptors. + hidden map[string]*Desc // stores hidden descriptors by pair, is a subset of descriptors. + self StableCollector +} + +// DescribeWithStability sends all descriptors to the provided channel. +// Every custom collector should over-write this method. +func (bsc *BaseStableCollector) DescribeWithStability(ch chan<- *Desc) { + panic(fmt.Errorf("custom collector should over-write DescribeWithStability method")) +} + +// Describe sends all descriptors to the provided channel. +// It intend to be called by prometheus registry. +func (bsc *BaseStableCollector) Describe(ch chan<- *prometheus.Desc) { + for _, d := range bsc.registerable { + ch <- d.toPrometheusDesc() + } +} + +// CollectWithStability sends all metrics to the provided channel. +// Every custom collector should over-write this method. +func (bsc *BaseStableCollector) CollectWithStability(ch chan<- Metric) { + panic(fmt.Errorf("custom collector should over-write CollectWithStability method")) +} + +// Collect is called by the Prometheus registry when collecting metrics. +func (bsc *BaseStableCollector) Collect(ch chan<- prometheus.Metric) { + mch := make(chan Metric) + + go func() { + bsc.self.CollectWithStability(mch) + close(mch) + }() + + for m := range mch { + // nil Metric usually means hidden metrics + if m == nil { + continue + } + + ch <- prometheus.Metric(m) + } +} + +func (bsc *BaseStableCollector) add(d *Desc) { + if len(d.fqName) == 0 { + panic("nameless metrics will be not allowed") + } + + if bsc.descriptors == nil { + bsc.descriptors = make(map[string]*Desc) + } + + if _, exist := bsc.descriptors[d.fqName]; exist { + panic(fmt.Sprintf("duplicate metrics (%s) will be not allowed", d.fqName)) + } + + bsc.descriptors[d.fqName] = d +} + +// Init intends to be called by registry. +func (bsc *BaseStableCollector) init(self StableCollector) { + bsc.self = self + + dch := make(chan *Desc) + + // collect all possible descriptions from custom side + go func() { + bsc.self.DescribeWithStability(dch) + close(dch) + }() + + for d := range dch { + bsc.add(d) + } +} + +func (bsc *BaseStableCollector) trackRegistrableDescriptor(d *Desc) { + if bsc.registerable == nil { + bsc.registerable = make(map[string]*Desc) + } + + bsc.registerable[d.fqName] = d +} + +func (bsc *BaseStableCollector) trackHiddenDescriptor(d *Desc) { + if bsc.hidden == nil { + bsc.hidden = make(map[string]*Desc) + } + + bsc.hidden[d.fqName] = d +} + +// Create intends to be called by registry. +// Create will return true as long as there is one or more metrics not be hidden. +// Otherwise return false, that means the whole collector will be ignored by registry. +func (bsc *BaseStableCollector) Create(version *semver.Version, self StableCollector) bool { + bsc.init(self) + + for _, d := range bsc.descriptors { + d.create(version) + if d.IsHidden() { + bsc.trackHiddenDescriptor(d) + } else { + bsc.trackRegistrableDescriptor(d) + } + } + + if len(bsc.registerable) > 0 { + return true + } + + return false +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (bsc *BaseStableCollector) ClearState() { + for _, d := range bsc.descriptors { + d.ClearState() + } + + bsc.descriptors = nil + bsc.registerable = nil + bsc.hidden = nil + bsc.self = nil +} + +// HiddenMetrics tells the list of hidden metrics with fqName. +func (bsc *BaseStableCollector) HiddenMetrics() (fqNames []string) { + for i := range bsc.hidden { + fqNames = append(fqNames, bsc.hidden[i].fqName) + } + return +} + +// Check if our BaseStableCollector implements necessary interface +var _ StableCollector = &BaseStableCollector{} diff --git a/vendor/k8s.io/component-base/metrics/counter.go b/vendor/k8s.io/component-base/metrics/counter.go new file mode 100644 index 0000000000..addf680c87 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/counter.go @@ -0,0 +1,233 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// Counter is our internal representation for our wrapping struct around prometheus +// counters. Counter implements both kubeCollector and CounterMetric. +type Counter struct { + CounterMetric + *CounterOpts + lazyMetric + selfCollector +} + +// The implementation of the Metric interface is expected by testutil.GetCounterMetricValue. +var _ Metric = &Counter{} + +// NewCounter returns an object which satisfies the kubeCollector and CounterMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewCounter(opts *CounterOpts) *Counter { + opts.StabilityLevel.setDefaults() + + kc := &Counter{ + CounterOpts: opts, + lazyMetric: lazyMetric{}, + } + kc.setPrometheusCounter(noop) + kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return kc +} + +func (c *Counter) Desc() *prometheus.Desc { + return c.metric.Desc() +} + +func (c *Counter) Write(to *dto.Metric) error { + return c.metric.Write(to) +} + +// Reset resets the underlying prometheus Counter to start counting from 0 again +func (c *Counter) Reset() { + if !c.IsCreated() { + return + } + c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts())) +} + +// setPrometheusCounter sets the underlying CounterMetric object, i.e. the thing that does the measurement. +func (c *Counter) setPrometheusCounter(counter prometheus.Counter) { + c.CounterMetric = counter + c.initSelfCollection(counter) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (c *Counter) DeprecatedVersion() *semver.Version { + return parseSemver(c.CounterOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying Counter. Until this method is called +// the underlying counter is a no-op. +func (c *Counter) initializeMetric() { + c.CounterOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus counter. + c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts())) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) Counter. Until this method +// is called the underlying counter is a no-op. +func (c *Counter) initializeDeprecatedMetric() { + c.CounterOpts.markDeprecated() + c.initializeMetric() +} + +// WithContext allows the normal Counter metric to pass in context. The context is no-op now. +func (c *Counter) WithContext(ctx context.Context) CounterMetric { + return c.CounterMetric +} + +// CounterVec is the internal representation of our wrapping struct around prometheus +// counterVecs. CounterVec implements both kubeCollector and CounterVecMetric. +type CounterVec struct { + *prometheus.CounterVec + *CounterOpts + lazyMetric + originalLabels []string +} + +// NewCounterVec returns an object which satisfies the kubeCollector and CounterVecMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewCounterVec(opts *CounterOpts, labels []string) *CounterVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + cv := &CounterVec{ + CounterVec: noopCounterVec, + CounterOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + cv.lazyInit(cv, fqName) + return cv +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *CounterVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.CounterOpts.DeprecatedVersion) + +} + +// initializeMetric invocation creates the actual underlying CounterVec. Until this method is called +// the underlying counterVec is a no-op. +func (v *CounterVec) initializeMetric() { + v.CounterOpts.annotateStabilityLevel() + v.CounterVec = prometheus.NewCounterVec(v.CounterOpts.toPromCounterOpts(), v.originalLabels) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) CounterVec. Until this method is called +// the underlying counterVec is a no-op. +func (v *CounterVec) initializeDeprecatedMetric() { + v.CounterOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/counter.go#L179-L197 + +// WithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created IFF the counterVec +// has been registered to a metrics registry. +func (v *CounterVec) WithLabelValues(lvs ...string) CounterMetric { + if !v.IsCreated() { + return noop // return no-op counter + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + return v.CounterVec.WithLabelValues(lvs...) +} + +// With returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created IFF the counterVec has +// been registered to a metrics registry. +func (v *CounterVec) With(labels map[string]string) CounterMetric { + if !v.IsCreated() { + return noop // return no-op counter + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + return v.CounterVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *CounterVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.CounterVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *CounterVec) Reset() { + if !v.IsCreated() { + return + } + + v.CounterVec.Reset() +} + +// WithContext returns wrapped CounterVec with context +func (v *CounterVec) WithContext(ctx context.Context) *CounterVecWithContext { + return &CounterVecWithContext{ + ctx: ctx, + CounterVec: *v, + } +} + +// CounterVecWithContext is the wrapper of CounterVec with context. +type CounterVecWithContext struct { + CounterVec + ctx context.Context +} + +// WithLabelValues is the wrapper of CounterVec.WithLabelValues. +func (vc *CounterVecWithContext) WithLabelValues(lvs ...string) CounterMetric { + return vc.CounterVec.WithLabelValues(lvs...) +} + +// With is the wrapper of CounterVec.With. +func (vc *CounterVecWithContext) With(labels map[string]string) CounterMetric { + return vc.CounterVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/desc.go b/vendor/k8s.io/component-base/metrics/desc.go new file mode 100644 index 0000000000..e53e3ee189 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/desc.go @@ -0,0 +1,225 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/klog/v2" +) + +// Desc is a prometheus.Desc extension. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabels is the label names. Their label values are variable. + constLabels Labels + // variableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + + // promDesc is the descriptor used by every Prometheus Metric. + promDesc *prometheus.Desc + annotatedHelp string + + // stabilityLevel represents the API guarantees for a given defined metric. + stabilityLevel StabilityLevel + // deprecatedVersion represents in which version this metric be deprecated. + deprecatedVersion string + + isDeprecated bool + isHidden bool + isCreated bool + createLock sync.RWMutex + markDeprecationOnce sync.Once + createOnce sync.Once + deprecateOnce sync.Once + hideOnce sync.Once + annotateOnce sync.Once +} + +// NewDesc extends prometheus.NewDesc with stability support. +// +// The stabilityLevel should be valid stability label, such as "metrics.ALPHA" +// and "metrics.STABLE"(Maybe "metrics.BETA" in future). Default value "metrics.ALPHA" +// will be used in case of empty or invalid stability label. +// +// The deprecatedVersion represents in which version this Metric be deprecated. +// The deprecation policy outlined by the control plane metrics stability KEP. +func NewDesc(fqName string, help string, variableLabels []string, constLabels Labels, + stabilityLevel StabilityLevel, deprecatedVersion string) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + annotatedHelp: help, + variableLabels: variableLabels, + constLabels: constLabels, + stabilityLevel: stabilityLevel, + deprecatedVersion: deprecatedVersion, + } + d.stabilityLevel.setDefaults() + + return d +} + +// String formats the Desc as a string. +// The stability metadata maybe annotated in 'HELP' section if called after registry, +// otherwise not. +// e.g. "Desc{fqName: "normal_stable_descriptor", help: "[STABLE] this is a stable descriptor", constLabels: {}, variableLabels: []}" +func (d *Desc) String() string { + if d.isCreated { + return d.promDesc.String() + } + + return prometheus.NewDesc(d.fqName, d.help, d.variableLabels, prometheus.Labels(d.constLabels)).String() +} + +// toPrometheusDesc transform self to prometheus.Desc +func (d *Desc) toPrometheusDesc() *prometheus.Desc { + return d.promDesc +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (d *Desc) DeprecatedVersion() *semver.Version { + return parseSemver(d.deprecatedVersion) + +} + +func (d *Desc) determineDeprecationStatus(version semver.Version) { + selfVersion := d.DeprecatedVersion() + if selfVersion == nil { + return + } + d.markDeprecationOnce.Do(func() { + if selfVersion.LTE(version) { + d.isDeprecated = true + } + if ShouldShowHidden() { + klog.Warningf("Hidden metrics(%s) have been manually overridden, showing this very deprecated metric.", d.fqName) + return + } + if shouldHide(&version, selfVersion) { + // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369 + // klog.Warningf("This metric(%s) has been deprecated for more than one release, hiding.", d.fqName) + d.isHidden = true + } + }) +} + +// IsHidden returns if metric will be hidden +func (d *Desc) IsHidden() bool { + return d.isHidden +} + +// IsDeprecated returns if metric has been deprecated +func (d *Desc) IsDeprecated() bool { + return d.isDeprecated +} + +// IsCreated returns if metric has been created. +func (d *Desc) IsCreated() bool { + d.createLock.RLock() + defer d.createLock.RUnlock() + + return d.isCreated +} + +// create forces the initialization of Desc which has been deferred until +// the point at which this method is invoked. This method will determine whether +// the Desc is deprecated or hidden, no-opting if the Desc should be considered +// hidden. Furthermore, this function no-opts and returns true if Desc is already +// created. +func (d *Desc) create(version *semver.Version) bool { + if version != nil { + d.determineDeprecationStatus(*version) + } + + // let's not create if this metric is slated to be hidden + if d.IsHidden() { + return false + } + d.createOnce.Do(func() { + d.createLock.Lock() + defer d.createLock.Unlock() + + d.isCreated = true + if d.IsDeprecated() { + d.initializeDeprecatedDesc() + } else { + d.initialize() + } + }) + return d.IsCreated() +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (d *Desc) ClearState() { + d.isDeprecated = false + d.isHidden = false + d.isCreated = false + + d.markDeprecationOnce = *new(sync.Once) + d.createOnce = *new(sync.Once) + d.deprecateOnce = *new(sync.Once) + d.hideOnce = *new(sync.Once) + d.annotateOnce = *new(sync.Once) + + d.annotatedHelp = d.help + d.promDesc = nil +} + +func (d *Desc) markDeprecated() { + d.deprecateOnce.Do(func() { + d.annotatedHelp = fmt.Sprintf("(Deprecated since %s) %s", d.deprecatedVersion, d.annotatedHelp) + }) +} + +func (d *Desc) annotateStabilityLevel() { + d.annotateOnce.Do(func() { + d.annotatedHelp = fmt.Sprintf("[%v] %v", d.stabilityLevel, d.annotatedHelp) + }) +} + +func (d *Desc) initialize() { + d.annotateStabilityLevel() + + // this actually creates the underlying prometheus desc. + d.promDesc = prometheus.NewDesc(d.fqName, d.annotatedHelp, d.variableLabels, prometheus.Labels(d.constLabels)) +} + +func (d *Desc) initializeDeprecatedDesc() { + d.markDeprecated() + d.initialize() +} + +// GetRawDesc will returns a new *Desc with original parameters provided to NewDesc(). +// +// It will be useful in testing scenario that the same Desc be registered to different registry. +// 1. Desc `D` is registered to registry 'A' in TestA (Note: `D` maybe created) +// 2. Desc `D` is registered to registry 'B' in TestB (Note: since 'D' has been created once, thus will be ignored by registry 'B') +func (d *Desc) GetRawDesc() *Desc { + return NewDesc(d.fqName, d.help, d.variableLabels, d.constLabels, d.stabilityLevel, d.deprecatedVersion) +} diff --git a/vendor/k8s.io/component-base/metrics/gauge.go b/vendor/k8s.io/component-base/metrics/gauge.go new file mode 100644 index 0000000000..a8d9c20101 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/gauge.go @@ -0,0 +1,236 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/component-base/version" +) + +// Gauge is our internal representation for our wrapping struct around prometheus +// gauges. kubeGauge implements both kubeCollector and KubeGauge. +type Gauge struct { + GaugeMetric + *GaugeOpts + lazyMetric + selfCollector +} + +// NewGauge returns an object which satisfies the kubeCollector and KubeGauge interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewGauge(opts *GaugeOpts) *Gauge { + opts.StabilityLevel.setDefaults() + + kc := &Gauge{ + GaugeOpts: opts, + lazyMetric: lazyMetric{}, + } + kc.setPrometheusGauge(noop) + kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return kc +} + +// setPrometheusGauge sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (g *Gauge) setPrometheusGauge(gauge prometheus.Gauge) { + g.GaugeMetric = gauge + g.initSelfCollection(gauge) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (g *Gauge) DeprecatedVersion() *semver.Version { + return parseSemver(g.GaugeOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying Gauge. Until this method is called +// the underlying gauge is a no-op. +func (g *Gauge) initializeMetric() { + g.GaugeOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + g.setPrometheusGauge(prometheus.NewGauge(g.GaugeOpts.toPromGaugeOpts())) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) Gauge. Until this method +// is called the underlying gauge is a no-op. +func (g *Gauge) initializeDeprecatedMetric() { + g.GaugeOpts.markDeprecated() + g.initializeMetric() +} + +// WithContext allows the normal Gauge metric to pass in context. The context is no-op now. +func (g *Gauge) WithContext(ctx context.Context) GaugeMetric { + return g.GaugeMetric +} + +// GaugeVec is the internal representation of our wrapping struct around prometheus +// gaugeVecs. kubeGaugeVec implements both kubeCollector and KubeGaugeVec. +type GaugeVec struct { + *prometheus.GaugeVec + *GaugeOpts + lazyMetric + originalLabels []string +} + +// NewGaugeVec returns an object which satisfies the kubeCollector and KubeGaugeVec interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewGaugeVec(opts *GaugeOpts, labels []string) *GaugeVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + cv := &GaugeVec{ + GaugeVec: noopGaugeVec, + GaugeOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + cv.lazyInit(cv, fqName) + return cv +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *GaugeVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.GaugeOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying GaugeVec. Until this method is called +// the underlying gaugeVec is a no-op. +func (v *GaugeVec) initializeMetric() { + v.GaugeOpts.annotateStabilityLevel() + v.GaugeVec = prometheus.NewGaugeVec(v.GaugeOpts.toPromGaugeOpts(), v.originalLabels) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) GaugeVec. Until this method is called +// the underlying gaugeVec is a no-op. +func (v *GaugeVec) initializeDeprecatedMetric() { + v.GaugeOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/gauge.go#L190-L208 + +// WithLabelValues returns the GaugeMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new GaugeMetric is created IFF the gaugeVec +// has been registered to a metrics registry. +func (v *GaugeVec) WithLabelValues(lvs ...string) GaugeMetric { + if !v.IsCreated() { + return noop // return no-op gauge + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + return v.GaugeVec.WithLabelValues(lvs...) +} + +// With returns the GaugeMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new GaugeMetric is created IFF the gaugeVec has +// been registered to a metrics registry. +func (v *GaugeVec) With(labels map[string]string) GaugeMetric { + if !v.IsCreated() { + return noop // return no-op gauge + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + return v.GaugeVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *GaugeVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.GaugeVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *GaugeVec) Reset() { + if !v.IsCreated() { + return + } + + v.GaugeVec.Reset() +} + +func newGaugeFunc(opts GaugeOpts, function func() float64, v semver.Version) GaugeFunc { + g := NewGauge(&opts) + + if !g.Create(&v) { + return nil + } + + return prometheus.NewGaugeFunc(g.GaugeOpts.toPromGaugeOpts(), function) +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + v := parseVersion(version.Get()) + + return newGaugeFunc(opts, function, v) +} + +// WithContext returns wrapped GaugeVec with context +func (v *GaugeVec) WithContext(ctx context.Context) *GaugeVecWithContext { + return &GaugeVecWithContext{ + ctx: ctx, + GaugeVec: *v, + } +} + +// GaugeVecWithContext is the wrapper of GaugeVec with context. +type GaugeVecWithContext struct { + GaugeVec + ctx context.Context +} + +// WithLabelValues is the wrapper of GaugeVec.WithLabelValues. +func (vc *GaugeVecWithContext) WithLabelValues(lvs ...string) GaugeMetric { + return vc.GaugeVec.WithLabelValues(lvs...) +} + +// With is the wrapper of GaugeVec.With. +func (vc *GaugeVecWithContext) With(labels map[string]string) GaugeMetric { + return vc.GaugeVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/histogram.go b/vendor/k8s.io/component-base/metrics/histogram.go new file mode 100644 index 0000000000..9dd75388b3 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/histogram.go @@ -0,0 +1,220 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" +) + +// DefBuckets is a wrapper for prometheus.DefBuckets +var DefBuckets = prometheus.DefBuckets + +// LinearBuckets is a wrapper for prometheus.LinearBuckets. +func LinearBuckets(start, width float64, count int) []float64 { + return prometheus.LinearBuckets(start, width, count) +} + +// ExponentialBuckets is a wrapper for prometheus.ExponentialBuckets. +func ExponentialBuckets(start, factor float64, count int) []float64 { + return prometheus.ExponentialBuckets(start, factor, count) +} + +// Histogram is our internal representation for our wrapping struct around prometheus +// histograms. Summary implements both kubeCollector and ObserverMetric +type Histogram struct { + ObserverMetric + *HistogramOpts + lazyMetric + selfCollector +} + +// NewHistogram returns an object which is Histogram-like. However, nothing +// will be measured until the histogram is registered somewhere. +func NewHistogram(opts *HistogramOpts) *Histogram { + opts.StabilityLevel.setDefaults() + + h := &Histogram{ + HistogramOpts: opts, + lazyMetric: lazyMetric{}, + } + h.setPrometheusHistogram(noopMetric{}) + h.lazyInit(h, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return h +} + +// setPrometheusHistogram sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (h *Histogram) setPrometheusHistogram(histogram prometheus.Histogram) { + h.ObserverMetric = histogram + h.initSelfCollection(histogram) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (h *Histogram) DeprecatedVersion() *semver.Version { + return parseSemver(h.HistogramOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Histogram object instantiation +// and stores a reference to it +func (h *Histogram) initializeMetric() { + h.HistogramOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + h.setPrometheusHistogram(prometheus.NewHistogram(h.HistogramOpts.toPromHistogramOpts())) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Histogram object instantiation +// but modifies the Help description prior to object instantiation. +func (h *Histogram) initializeDeprecatedMetric() { + h.HistogramOpts.markDeprecated() + h.initializeMetric() +} + +// WithContext allows the normal Histogram metric to pass in context. The context is no-op now. +func (h *Histogram) WithContext(ctx context.Context) ObserverMetric { + return h.ObserverMetric +} + +// HistogramVec is the internal representation of our wrapping struct around prometheus +// histogramVecs. +type HistogramVec struct { + *prometheus.HistogramVec + *HistogramOpts + lazyMetric + originalLabels []string +} + +// NewHistogramVec returns an object which satisfies kubeCollector and wraps the +// prometheus.HistogramVec object. However, the object returned will not measure +// anything unless the collector is first registered, since the metric is lazily instantiated. +func NewHistogramVec(opts *HistogramOpts, labels []string) *HistogramVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + v := &HistogramVec{ + HistogramVec: noopHistogramVec, + HistogramOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + v.lazyInit(v, fqName) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *HistogramVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.HistogramOpts.DeprecatedVersion) +} + +func (v *HistogramVec) initializeMetric() { + v.HistogramOpts.annotateStabilityLevel() + v.HistogramVec = prometheus.NewHistogramVec(v.HistogramOpts.toPromHistogramOpts(), v.originalLabels) +} + +func (v *HistogramVec) initializeDeprecatedMetric() { + v.HistogramOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/histogram.go#L460-L470 + +// WithLabelValues returns the ObserverMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new ObserverMetric is created IFF the HistogramVec +// has been registered to a metrics registry. +func (v *HistogramVec) WithLabelValues(lvs ...string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + return v.HistogramVec.WithLabelValues(lvs...) +} + +// With returns the ObserverMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new ObserverMetric is created IFF the HistogramVec has +// been registered to a metrics registry. +func (v *HistogramVec) With(labels map[string]string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + return v.HistogramVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *HistogramVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.HistogramVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *HistogramVec) Reset() { + if !v.IsCreated() { + return + } + + v.HistogramVec.Reset() +} + +// WithContext returns wrapped HistogramVec with context +func (v *HistogramVec) WithContext(ctx context.Context) *HistogramVecWithContext { + return &HistogramVecWithContext{ + ctx: ctx, + HistogramVec: *v, + } +} + +// HistogramVecWithContext is the wrapper of HistogramVec with context. +type HistogramVecWithContext struct { + HistogramVec + ctx context.Context +} + +// WithLabelValues is the wrapper of HistogramVec.WithLabelValues. +func (vc *HistogramVecWithContext) WithLabelValues(lvs ...string) ObserverMetric { + return vc.HistogramVec.WithLabelValues(lvs...) +} + +// With is the wrapper of HistogramVec.With. +func (vc *HistogramVecWithContext) With(labels map[string]string) ObserverMetric { + return vc.HistogramVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/http.go b/vendor/k8s.io/component-base/metrics/http.go new file mode 100644 index 0000000000..3394a8f711 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/http.go @@ -0,0 +1,77 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "io" + "net/http" + + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError promhttp.HandlerErrorHandling = iota + + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts promhttp.HandlerOpts + +func (ho *HandlerOpts) toPromhttpHandlerOpts() promhttp.HandlerOpts { + return promhttp.HandlerOpts(*ho) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg Gatherer, opts HandlerOpts) http.Handler { + return promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts()) +} + +// HandlerWithReset return an http.Handler with Reset +func HandlerWithReset(reg KubeRegistry, opts HandlerOpts) http.Handler { + defaultHandler := promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts()) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodDelete { + reg.Reset() + io.WriteString(w, "metrics reset\n") + return + } + defaultHandler.ServeHTTP(w, r) + }) +} diff --git a/vendor/k8s.io/component-base/metrics/labels.go b/vendor/k8s.io/component-base/metrics/labels.go new file mode 100644 index 0000000000..11af3ae424 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/labels.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Labels represents a collection of label name -> value mappings. +type Labels prometheus.Labels diff --git a/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go new file mode 100644 index 0000000000..2605103c10 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go @@ -0,0 +1,86 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package legacyregistry + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "k8s.io/component-base/metrics" +) + +var ( + defaultRegistry = metrics.NewKubeRegistry() + // DefaultGatherer exposes the global registry gatherer + DefaultGatherer metrics.Gatherer = defaultRegistry + // Reset calls reset on the global registry + Reset = defaultRegistry.Reset + // MustRegister registers registerable metrics but uses the global registry. + MustRegister = defaultRegistry.MustRegister + // RawMustRegister registers prometheus collectors but uses the global registry, this + // bypasses the metric stability framework + // + // Deprecated + RawMustRegister = defaultRegistry.RawMustRegister + + // Register registers a collectable metric but uses the global registry + Register = defaultRegistry.Register +) + +func init() { + RawMustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{})) + RawMustRegister(prometheus.NewGoCollector()) +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using promhttp.Handler instead. +func Handler() http.Handler { + return promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, promhttp.HandlerFor(defaultRegistry, promhttp.HandlerOpts{})) +} + +// HandlerWithReset returns an HTTP handler for the DefaultGatherer but invokes +// registry reset if the http method is DELETE. +func HandlerWithReset() http.Handler { + return promhttp.InstrumentMetricHandler( + prometheus.DefaultRegisterer, + metrics.HandlerWithReset(defaultRegistry, metrics.HandlerOpts{})) +} + +// CustomRegister registers a custom collector but uses the global registry. +func CustomRegister(c metrics.StableCollector) error { + err := defaultRegistry.CustomRegister(c) + + //TODO(RainbowMango): Maybe we can wrap this error by error wrapping.(Golang 1.13) + _ = prometheus.Register(c) + + return err +} + +// CustomMustRegister registers custom collectors but uses the global registry. +func CustomMustRegister(cs ...metrics.StableCollector) { + defaultRegistry.CustomMustRegister(cs...) + + for _, c := range cs { + prometheus.MustRegister(c) + } +} diff --git a/vendor/k8s.io/component-base/metrics/metric.go b/vendor/k8s.io/component-base/metrics/metric.go new file mode 100644 index 0000000000..2f7b880d86 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/metric.go @@ -0,0 +1,243 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + + "k8s.io/klog/v2" +) + +/* +kubeCollector extends the prometheus.Collector interface to allow customization of the metric +registration process. Defer metric initialization until Create() is called, which then +delegates to the underlying metric's initializeMetric or initializeDeprecatedMetric +method call depending on whether the metric is deprecated or not. +*/ +type kubeCollector interface { + Collector + lazyKubeMetric + DeprecatedVersion() *semver.Version + // Each collector metric should provide an initialization function + // for both deprecated and non-deprecated variants of a metric. This + // is necessary since metric instantiation will be deferred + // until the metric is actually registered somewhere. + initializeMetric() + initializeDeprecatedMetric() +} + +/* +lazyKubeMetric defines our metric registration interface. lazyKubeMetric objects are expected +to lazily instantiate metrics (i.e defer metric instantiation until when +the Create() function is explicitly called). +*/ +type lazyKubeMetric interface { + Create(*semver.Version) bool + IsCreated() bool + IsHidden() bool + IsDeprecated() bool +} + +/* +lazyMetric implements lazyKubeMetric. A lazy metric is lazy because it waits until metric +registration time before instantiation. Add it as an anonymous field to a struct that +implements kubeCollector to get deferred registration behavior. You must call lazyInit +with the kubeCollector itself as an argument. +*/ +type lazyMetric struct { + fqName string + isDeprecated bool + isHidden bool + isCreated bool + createLock sync.RWMutex + markDeprecationOnce sync.Once + createOnce sync.Once + self kubeCollector +} + +func (r *lazyMetric) IsCreated() bool { + r.createLock.RLock() + defer r.createLock.RUnlock() + return r.isCreated +} + +// lazyInit provides the lazyMetric with a reference to the kubeCollector it is supposed +// to allow lazy initialization for. It should be invoked in the factory function which creates new +// kubeCollector type objects. +func (r *lazyMetric) lazyInit(self kubeCollector, fqName string) { + r.fqName = fqName + r.self = self +} + +// preprocessMetric figures out whether the lazy metric should be hidden or not. +// This method takes a Version argument which should be the version of the binary in which +// this code is currently being executed. A metric can be hidden under two conditions: +// 1. if the metric is deprecated and is outside the grace period (i.e. has been +// deprecated for more than one release +// 2. if the metric is manually disabled via a CLI flag. +// +// Disclaimer: disabling a metric via a CLI flag has higher precedence than +// deprecation and will override show-hidden-metrics for the explicitly +// disabled metric. +func (r *lazyMetric) preprocessMetric(version semver.Version) { + disabledMetricsLock.RLock() + defer disabledMetricsLock.RUnlock() + // disabling metrics is higher in precedence than showing hidden metrics + if _, ok := disabledMetrics[r.fqName]; ok { + r.isHidden = true + return + } + selfVersion := r.self.DeprecatedVersion() + if selfVersion == nil { + return + } + r.markDeprecationOnce.Do(func() { + if selfVersion.LTE(version) { + r.isDeprecated = true + } + + if ShouldShowHidden() { + klog.Warningf("Hidden metrics (%s) have been manually overridden, showing this very deprecated metric.", r.fqName) + return + } + if shouldHide(&version, selfVersion) { + // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369 + // klog.Warningf("This metric has been deprecated for more than one release, hiding.") + r.isHidden = true + } + }) +} + +func (r *lazyMetric) IsHidden() bool { + return r.isHidden +} + +func (r *lazyMetric) IsDeprecated() bool { + return r.isDeprecated +} + +// Create forces the initialization of metric which has been deferred until +// the point at which this method is invoked. This method will determine whether +// the metric is deprecated or hidden, no-opting if the metric should be considered +// hidden. Furthermore, this function no-opts and returns true if metric is already +// created. +func (r *lazyMetric) Create(version *semver.Version) bool { + if version != nil { + r.preprocessMetric(*version) + } + // let's not create if this metric is slated to be hidden + if r.IsHidden() { + return false + } + r.createOnce.Do(func() { + r.createLock.Lock() + defer r.createLock.Unlock() + r.isCreated = true + if r.IsDeprecated() { + r.self.initializeDeprecatedMetric() + } else { + r.self.initializeMetric() + } + }) + return r.IsCreated() +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (r *lazyMetric) ClearState() { + r.createLock.Lock() + defer r.createLock.Unlock() + + r.isDeprecated = false + r.isHidden = false + r.isCreated = false + r.markDeprecationOnce = *(new(sync.Once)) + r.createOnce = *(new(sync.Once)) +} + +// FQName returns the fully-qualified metric name of the collector. +func (r *lazyMetric) FQName() string { + return r.fqName +} + +/* +This code is directly lifted from the prometheus codebase. It's a convenience struct which +allows you satisfy the Collector interface automatically if you already satisfy the Metric interface. + +For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/collector.go#L98-L120 +*/ +type selfCollector struct { + metric prometheus.Metric +} + +func (c *selfCollector) initSelfCollection(m prometheus.Metric) { + c.metric = m +} + +func (c *selfCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.metric.Desc() +} + +func (c *selfCollector) Collect(ch chan<- prometheus.Metric) { + ch <- c.metric +} + +// no-op vecs for convenience +var noopCounterVec = &prometheus.CounterVec{} +var noopHistogramVec = &prometheus.HistogramVec{} +var noopGaugeVec = &prometheus.GaugeVec{} +var noopObserverVec = &noopObserverVector{} + +// just use a convenience struct for all the no-ops +var noop = &noopMetric{} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Add(float64) {} +func (noopMetric) Dec() {} +func (noopMetric) Set(float64) {} +func (noopMetric) Sub(float64) {} +func (noopMetric) Observe(float64) {} +func (noopMetric) SetToCurrentTime() {} +func (noopMetric) Desc() *prometheus.Desc { return nil } +func (noopMetric) Write(*dto.Metric) error { return nil } +func (noopMetric) Describe(chan<- *prometheus.Desc) {} +func (noopMetric) Collect(chan<- prometheus.Metric) {} + +type noopObserverVector struct{} + +func (noopObserverVector) GetMetricWith(prometheus.Labels) (prometheus.Observer, error) { + return noop, nil +} +func (noopObserverVector) GetMetricWithLabelValues(...string) (prometheus.Observer, error) { + return noop, nil +} +func (noopObserverVector) With(prometheus.Labels) prometheus.Observer { return noop } +func (noopObserverVector) WithLabelValues(...string) prometheus.Observer { return noop } +func (noopObserverVector) CurryWith(prometheus.Labels) (prometheus.ObserverVec, error) { + return noopObserverVec, nil +} +func (noopObserverVector) MustCurryWith(prometheus.Labels) prometheus.ObserverVec { + return noopObserverVec +} +func (noopObserverVector) Describe(chan<- *prometheus.Desc) {} +func (noopObserverVector) Collect(chan<- prometheus.Metric) {} diff --git a/vendor/k8s.io/component-base/metrics/options.go b/vendor/k8s.io/component-base/metrics/options.go new file mode 100644 index 0000000000..91a76ba7e5 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/options.go @@ -0,0 +1,125 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "regexp" + + "github.com/blang/semver" + "github.com/spf13/pflag" + + "k8s.io/component-base/version" +) + +// Options has all parameters needed for exposing metrics from components +type Options struct { + ShowHiddenMetricsForVersion string + DisabledMetrics []string + AllowListMapping map[string]string +} + +// NewOptions returns default metrics options +func NewOptions() *Options { + return &Options{} +} + +// Validate validates metrics flags options. +func (o *Options) Validate() []error { + var errs []error + err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), o.ShowHiddenMetricsForVersion) + if err != nil { + errs = append(errs, err) + } + + if err := validateAllowMetricLabel(o.AllowListMapping); err != nil { + errs = append(errs, err) + } + + if len(errs) == 0 { + return nil + } + return errs +} + +// AddFlags adds flags for exposing component metrics. +func (o *Options) AddFlags(fs *pflag.FlagSet) { + if o != nil { + o = NewOptions() + } + fs.StringVar(&o.ShowHiddenMetricsForVersion, "show-hidden-metrics-for-version", o.ShowHiddenMetricsForVersion, + "The previous version for which you want to show hidden metrics. "+ + "Only the previous minor version is meaningful, other values will not be allowed. "+ + "The format is ., e.g.: '1.16'. "+ + "The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, "+ + "rather than being surprised when they are permanently removed in the release after that.") + fs.StringSliceVar(&o.DisabledMetrics, + "disabled-metrics", + o.DisabledMetrics, + "This flag provides an escape hatch for misbehaving metrics. "+ + "You must provide the fully qualified metric name in order to disable it. "+ + "Disclaimer: disabling metrics is higher in precedence than showing hidden metrics.") + fs.StringToStringVar(&o.AllowListMapping, "allow-metric-labels", o.AllowListMapping, + "The map from metric-label to value allow-list of this label. The key's format is ,. "+ + "The value's format is ,..."+ + "e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.") +} + +// Apply applies parameters into global configuration of metrics. +func (o *Options) Apply() { + if o == nil { + return + } + if len(o.ShowHiddenMetricsForVersion) > 0 { + SetShowHidden() + } + // set disabled metrics + for _, metricName := range o.DisabledMetrics { + SetDisabledMetric(metricName) + } + if o.AllowListMapping != nil { + SetLabelAllowListFromCLI(o.AllowListMapping) + } +} + +func validateShowHiddenMetricsVersion(currentVersion semver.Version, targetVersionStr string) error { + if targetVersionStr == "" { + return nil + } + + validVersionStr := fmt.Sprintf("%d.%d", currentVersion.Major, currentVersion.Minor-1) + if targetVersionStr != validVersionStr { + return fmt.Errorf("--show-hidden-metrics-for-version must be omitted or have the value '%v'. Only the previous minor version is allowed", validVersionStr) + } + + return nil +} + +func validateAllowMetricLabel(allowListMapping map[string]string) error { + if allowListMapping == nil { + return nil + } + metricNameRegex := `[a-zA-Z_:][a-zA-Z0-9_:]*` + labelRegex := `[a-zA-Z_][a-zA-Z0-9_]*` + for k := range allowListMapping { + reg := regexp.MustCompile(metricNameRegex + `,` + labelRegex) + if reg.FindString(k) != k { + return fmt.Errorf("--allow-metric-labels must has a list of kv pair with format `metricName:labelName=labelValue, labelValue,...`") + } + } + return nil +} diff --git a/vendor/k8s.io/component-base/metrics/opts.go b/vendor/k8s.io/component-base/metrics/opts.go new file mode 100644 index 0000000000..04203b74e0 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/opts.go @@ -0,0 +1,301 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "k8s.io/apimachinery/pkg/util/sets" +) + +var ( + labelValueAllowLists = map[string]*MetricLabelAllowList{} + allowListLock sync.RWMutex +) + +// KubeOpts is superset struct for prometheus.Opts. The prometheus Opts structure +// is purposefully not embedded here because that would change struct initialization +// in the manner which people are currently accustomed. +// +// Name must be set to a non-empty string. DeprecatedVersion is defined only +// if the metric for which this options applies is, in fact, deprecated. +type KubeOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + return prometheus.BuildFQName(namespace, subsystem, name) +} + +// StabilityLevel represents the API guarantees for a given defined metric. +type StabilityLevel string + +const ( + // ALPHA metrics have no stability guarantees, as such, labels may + // be arbitrarily added/removed and the metric may be deleted at any time. + ALPHA StabilityLevel = "ALPHA" + // STABLE metrics are guaranteed not be mutated and removal is governed by + // the deprecation policy outlined in by the control plane metrics stability KEP. + STABLE StabilityLevel = "STABLE" +) + +// setDefaults takes 'ALPHA' in case of empty. +func (sl *StabilityLevel) setDefaults() { + switch *sl { + case "": + *sl = ALPHA + default: + // no-op, since we have a StabilityLevel already + } +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts KubeOpts + +// Modify help description on the metric description. +func (o *CounterOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *CounterOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *CounterOpts) toPromCounterOpts() prometheus.CounterOpts { + return prometheus.CounterOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + } +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts KubeOpts + +// Modify help description on the metric description. +func (o *GaugeOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *GaugeOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *GaugeOpts) toPromGaugeOpts() prometheus.GaugeOpts { + return prometheus.GaugeOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + } +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type HistogramOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Buckets []float64 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// Modify help description on the metric description. +func (o *HistogramOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *HistogramOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *HistogramOpts) toPromHistogramOpts() prometheus.HistogramOpts { + return prometheus.HistogramOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Buckets: o.Buckets, + } +} + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v0.10 of the library. +type SummaryOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Objectives map[float64]float64 + MaxAge time.Duration + AgeBuckets uint32 + BufCap uint32 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// Modify help description on the metric description. +func (o *SummaryOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *SummaryOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// Deprecated: DefObjectives will not be used as the default objectives in +// v1.0.0 of the library. The default Summary will have no quantiles then. +var ( + defObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} +) + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *SummaryOpts) toPromSummaryOpts() prometheus.SummaryOpts { + // we need to retain existing quantile behavior for backwards compatibility, + // so let's do what prometheus used to do prior to v1. + objectives := o.Objectives + if objectives == nil { + objectives = defObjectives + } + return prometheus.SummaryOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Objectives: objectives, + MaxAge: o.MaxAge, + AgeBuckets: o.AgeBuckets, + BufCap: o.BufCap, + } +} + +type MetricLabelAllowList struct { + labelToAllowList map[string]sets.String +} + +func (allowList *MetricLabelAllowList) ConstrainToAllowedList(labelNameList, labelValueList []string) { + for index, value := range labelValueList { + name := labelNameList[index] + if allowValues, ok := allowList.labelToAllowList[name]; ok { + if !allowValues.Has(value) { + labelValueList[index] = "unexpected" + } + } + } +} + +func (allowList *MetricLabelAllowList) ConstrainLabelMap(labels map[string]string) { + for name, value := range labels { + if allowValues, ok := allowList.labelToAllowList[name]; ok { + if !allowValues.Has(value) { + labels[name] = "unexpected" + } + } + } +} + +func SetLabelAllowListFromCLI(allowListMapping map[string]string) { + allowListLock.Lock() + defer allowListLock.Unlock() + for metricLabelName, labelValues := range allowListMapping { + metricName := strings.Split(metricLabelName, ",")[0] + labelName := strings.Split(metricLabelName, ",")[1] + valueSet := sets.NewString(strings.Split(labelValues, ",")...) + + allowList, ok := labelValueAllowLists[metricName] + if ok { + allowList.labelToAllowList[labelName] = valueSet + } else { + labelToAllowList := make(map[string]sets.String) + labelToAllowList[labelName] = valueSet + labelValueAllowLists[metricName] = &MetricLabelAllowList{ + labelToAllowList, + } + } + } +} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime.go b/vendor/k8s.io/component-base/metrics/processstarttime.go new file mode 100644 index 0000000000..4b5e76935c --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/processstarttime.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "time" + + "k8s.io/klog/v2" +) + +var processStartTime = NewGaugeVec( + &GaugeOpts{ + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + StabilityLevel: ALPHA, + }, + []string{}, +) + +// RegisterProcessStartTime registers the process_start_time_seconds to +// a prometheus registry. This metric needs to be included to ensure counter +// data fidelity. +func RegisterProcessStartTime(registrationFunc func(Registerable) error) error { + start, err := getProcessStart() + if err != nil { + klog.Errorf("Could not get process start time, %v", err) + start = float64(time.Now().Unix()) + } + // processStartTime is a lazy metric which only get initialized after registered. + // so we need to register the metric first and then set the value for it + if err = registrationFunc(processStartTime); err != nil { + return err + } + + processStartTime.WithLabelValues().Set(start) + return nil +} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime_others.go b/vendor/k8s.io/component-base/metrics/processstarttime_others.go new file mode 100644 index 0000000000..89ea8a68e8 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/processstarttime_others.go @@ -0,0 +1,38 @@ +// +build !windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "os" + + "github.com/prometheus/procfs" +) + +func getProcessStart() (float64, error) { + pid := os.Getpid() + p, err := procfs.NewProc(pid) + if err != nil { + return 0, err + } + + if stat, err := p.Stat(); err == nil { + return stat.StartTime() + } + return 0, err +} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime_windows.go b/vendor/k8s.io/component-base/metrics/processstarttime_windows.go new file mode 100644 index 0000000000..8fcdf273a2 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/processstarttime_windows.go @@ -0,0 +1,33 @@ +// +build windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "golang.org/x/sys/windows" +) + +func getProcessStart() (float64, error) { + processHandle := windows.CurrentProcess() + + var creationTime, exitTime, kernelTime, userTime windows.Filetime + if err := windows.GetProcessTimes(processHandle, &creationTime, &exitTime, &kernelTime, &userTime); err != nil { + return 0, err + } + return float64(creationTime.Nanoseconds() / 1e9), nil +} diff --git a/vendor/k8s.io/component-base/metrics/registry.go b/vendor/k8s.io/component-base/metrics/registry.go new file mode 100644 index 0000000000..b608fb568f --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/registry.go @@ -0,0 +1,337 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + "sync/atomic" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + + apimachineryversion "k8s.io/apimachinery/pkg/version" + "k8s.io/component-base/version" +) + +var ( + showHiddenOnce sync.Once + disabledMetricsLock sync.RWMutex + showHidden atomic.Value + registries []*kubeRegistry // stores all registries created by NewKubeRegistry() + registriesLock sync.RWMutex + disabledMetrics = map[string]struct{}{} +) + +// shouldHide be used to check if a specific metric with deprecated version should be hidden +// according to metrics deprecation lifecycle. +func shouldHide(currentVersion *semver.Version, deprecatedVersion *semver.Version) bool { + guardVersion, err := semver.Make(fmt.Sprintf("%d.%d.0", currentVersion.Major, currentVersion.Minor)) + if err != nil { + panic("failed to make version from current version") + } + + if deprecatedVersion.LT(guardVersion) { + return true + } + + return false +} + +// ValidateShowHiddenMetricsVersion checks invalid version for which show hidden metrics. +func ValidateShowHiddenMetricsVersion(v string) []error { + err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), v) + if err != nil { + return []error{err} + } + + return nil +} + +func SetDisabledMetric(name string) { + disabledMetricsLock.Lock() + defer disabledMetricsLock.Unlock() + disabledMetrics[name] = struct{}{} +} + +// SetShowHidden will enable showing hidden metrics. This will no-opt +// after the initial call +func SetShowHidden() { + showHiddenOnce.Do(func() { + showHidden.Store(true) + + // re-register collectors that has been hidden in phase of last registry. + for _, r := range registries { + r.enableHiddenCollectors() + r.enableHiddenStableCollectors() + } + }) +} + +// ShouldShowHidden returns whether showing hidden deprecated metrics +// is enabled. While the primary usecase for this is internal (to determine +// registration behavior) this can also be used to introspect +func ShouldShowHidden() bool { + return showHidden.Load() != nil && showHidden.Load().(bool) +} + +// Registerable is an interface for a collector metric which we +// will register with KubeRegistry. +type Registerable interface { + prometheus.Collector + + // Create will mark deprecated state for the collector + Create(version *semver.Version) bool + + // ClearState will clear all the states marked by Create. + ClearState() + + // FQName returns the fully-qualified metric name of the collector. + FQName() string +} + +type resettable interface { + Reset() +} + +// KubeRegistry is an interface which implements a subset of prometheus.Registerer and +// prometheus.Gatherer interfaces +type KubeRegistry interface { + // Deprecated + RawMustRegister(...prometheus.Collector) + // CustomRegister is our internal variant of Prometheus registry.Register + CustomRegister(c StableCollector) error + // CustomMustRegister is our internal variant of Prometheus registry.MustRegister + CustomMustRegister(cs ...StableCollector) + // Register conforms to Prometheus registry.Register + Register(Registerable) error + // MustRegister conforms to Prometheus registry.MustRegister + MustRegister(...Registerable) + // Unregister conforms to Prometheus registry.Unregister + Unregister(collector Collector) bool + // Gather conforms to Prometheus gatherer.Gather + Gather() ([]*dto.MetricFamily, error) + // Reset invokes the Reset() function on all items in the registry + // which are added as resettables. + Reset() +} + +// kubeRegistry is a wrapper around a prometheus registry-type object. Upon initialization +// the kubernetes binary version information is loaded into the registry object, so that +// automatic behavior can be configured for metric versioning. +type kubeRegistry struct { + PromRegistry + version semver.Version + hiddenCollectors map[string]Registerable // stores all collectors that has been hidden + stableCollectors []StableCollector // stores all stable collector + hiddenCollectorsLock sync.RWMutex + stableCollectorsLock sync.RWMutex + resetLock sync.RWMutex + resettables []resettable +} + +// Register registers a new Collector to be included in metrics +// collection. It returns an error if the descriptors provided by the +// Collector are invalid or if they — in combination with descriptors of +// already registered Collectors — do not fulfill the consistency and +// uniqueness criteria described in the documentation of metric.Desc. +func (kr *kubeRegistry) Register(c Registerable) error { + if c.Create(&kr.version) { + defer kr.addResettable(c) + return kr.PromRegistry.Register(c) + } + + kr.trackHiddenCollector(c) + return nil +} + +// MustRegister works like Register but registers any number of +// Collectors and panics upon the first registration that causes an +// error. +func (kr *kubeRegistry) MustRegister(cs ...Registerable) { + metrics := make([]prometheus.Collector, 0, len(cs)) + for _, c := range cs { + if c.Create(&kr.version) { + metrics = append(metrics, c) + kr.addResettable(c) + } else { + kr.trackHiddenCollector(c) + } + } + kr.PromRegistry.MustRegister(metrics...) +} + +// CustomRegister registers a new custom collector. +func (kr *kubeRegistry) CustomRegister(c StableCollector) error { + kr.trackStableCollectors(c) + defer kr.addResettable(c) + if c.Create(&kr.version, c) { + return kr.PromRegistry.Register(c) + } + return nil +} + +// CustomMustRegister works like CustomRegister but registers any number of +// StableCollectors and panics upon the first registration that causes an +// error. +func (kr *kubeRegistry) CustomMustRegister(cs ...StableCollector) { + kr.trackStableCollectors(cs...) + collectors := make([]prometheus.Collector, 0, len(cs)) + for _, c := range cs { + if c.Create(&kr.version, c) { + kr.addResettable(c) + collectors = append(collectors, c) + } + } + kr.PromRegistry.MustRegister(collectors...) +} + +// RawMustRegister takes a native prometheus.Collector and registers the collector +// to the registry. This bypasses metrics safety checks, so should only be used +// to register custom prometheus collectors. +// +// Deprecated +func (kr *kubeRegistry) RawMustRegister(cs ...prometheus.Collector) { + kr.PromRegistry.MustRegister(cs...) + for _, c := range cs { + kr.addResettable(c) + } +} + +// addResettable will automatically add our metric to our reset +// list if it satisfies the interface +func (kr *kubeRegistry) addResettable(i interface{}) { + kr.resetLock.Lock() + defer kr.resetLock.Unlock() + if resettable, ok := i.(resettable); ok { + kr.resettables = append(kr.resettables, resettable) + } +} + +// Unregister unregisters the Collector that equals the Collector passed +// in as an argument. (Two Collectors are considered equal if their +// Describe method yields the same set of descriptors.) The function +// returns whether a Collector was unregistered. Note that an unchecked +// Collector cannot be unregistered (as its Describe method does not +// yield any descriptor). +func (kr *kubeRegistry) Unregister(collector Collector) bool { + return kr.PromRegistry.Unregister(collector) +} + +// Gather calls the Collect method of the registered Collectors and then +// gathers the collected metrics into a lexicographically sorted slice +// of uniquely named MetricFamily protobufs. Gather ensures that the +// returned slice is valid and self-consistent so that it can be used +// for valid exposition. As an exception to the strict consistency +// requirements described for metric.Desc, Gather will tolerate +// different sets of label names for metrics of the same metric family. +func (kr *kubeRegistry) Gather() ([]*dto.MetricFamily, error) { + return kr.PromRegistry.Gather() +} + +// trackHiddenCollector stores all hidden collectors. +func (kr *kubeRegistry) trackHiddenCollector(c Registerable) { + kr.hiddenCollectorsLock.Lock() + defer kr.hiddenCollectorsLock.Unlock() + + kr.hiddenCollectors[c.FQName()] = c +} + +// trackStableCollectors stores all custom collectors. +func (kr *kubeRegistry) trackStableCollectors(cs ...StableCollector) { + kr.stableCollectorsLock.Lock() + defer kr.stableCollectorsLock.Unlock() + + kr.stableCollectors = append(kr.stableCollectors, cs...) +} + +// enableHiddenCollectors will re-register all of the hidden collectors. +func (kr *kubeRegistry) enableHiddenCollectors() { + if len(kr.hiddenCollectors) == 0 { + return + } + + kr.hiddenCollectorsLock.Lock() + cs := make([]Registerable, 0, len(kr.hiddenCollectors)) + + for _, c := range kr.hiddenCollectors { + c.ClearState() + cs = append(cs, c) + } + + kr.hiddenCollectors = nil + kr.hiddenCollectorsLock.Unlock() + kr.MustRegister(cs...) +} + +// enableHiddenStableCollectors will re-register the stable collectors if there is one or more hidden metrics in it. +// Since we can not register a metrics twice, so we have to unregister first then register again. +func (kr *kubeRegistry) enableHiddenStableCollectors() { + if len(kr.stableCollectors) == 0 { + return + } + + kr.stableCollectorsLock.Lock() + + cs := make([]StableCollector, 0, len(kr.stableCollectors)) + for _, c := range kr.stableCollectors { + if len(c.HiddenMetrics()) > 0 { + kr.Unregister(c) // unregister must happens before clear state, otherwise no metrics would be unregister + c.ClearState() + cs = append(cs, c) + } + } + + kr.stableCollectors = nil + kr.stableCollectorsLock.Unlock() + kr.CustomMustRegister(cs...) +} + +// Reset invokes Reset on all metrics that are resettable. +func (kr *kubeRegistry) Reset() { + kr.resetLock.RLock() + defer kr.resetLock.RUnlock() + for _, r := range kr.resettables { + r.Reset() + } +} + +// BuildVersion is a helper function that can be easily mocked. +var BuildVersion = version.Get + +func newKubeRegistry(v apimachineryversion.Info) *kubeRegistry { + r := &kubeRegistry{ + PromRegistry: prometheus.NewRegistry(), + version: parseVersion(v), + hiddenCollectors: make(map[string]Registerable), + resettables: make([]resettable, 0), + } + + registriesLock.Lock() + defer registriesLock.Unlock() + registries = append(registries, r) + + return r +} + +// NewKubeRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewKubeRegistry() KubeRegistry { + r := newKubeRegistry(BuildVersion()) + return r +} diff --git a/vendor/k8s.io/component-base/metrics/summary.go b/vendor/k8s.io/component-base/metrics/summary.go new file mode 100644 index 0000000000..4732b02e30 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/summary.go @@ -0,0 +1,214 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" +) + +// Summary is our internal representation for our wrapping struct around prometheus +// summaries. Summary implements both kubeCollector and ObserverMetric +// +// DEPRECATED: as per the metrics overhaul KEP +type Summary struct { + ObserverMetric + *SummaryOpts + lazyMetric + selfCollector +} + +// NewSummary returns an object which is Summary-like. However, nothing +// will be measured until the summary is registered somewhere. +// +// DEPRECATED: as per the metrics overhaul KEP +func NewSummary(opts *SummaryOpts) *Summary { + opts.StabilityLevel.setDefaults() + + s := &Summary{ + SummaryOpts: opts, + lazyMetric: lazyMetric{}, + } + s.setPrometheusSummary(noopMetric{}) + s.lazyInit(s, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return s +} + +// setPrometheusSummary sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (s *Summary) setPrometheusSummary(summary prometheus.Summary) { + s.ObserverMetric = summary + s.initSelfCollection(summary) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (s *Summary) DeprecatedVersion() *semver.Version { + return parseSemver(s.SummaryOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Summary object instantiation +// and stores a reference to it +func (s *Summary) initializeMetric() { + s.SummaryOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + s.setPrometheusSummary(prometheus.NewSummary(s.SummaryOpts.toPromSummaryOpts())) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Summary object instantiation +// but modifies the Help description prior to object instantiation. +func (s *Summary) initializeDeprecatedMetric() { + s.SummaryOpts.markDeprecated() + s.initializeMetric() +} + +// WithContext allows the normal Summary metric to pass in context. The context is no-op now. +func (s *Summary) WithContext(ctx context.Context) ObserverMetric { + return s.ObserverMetric +} + +// SummaryVec is the internal representation of our wrapping struct around prometheus +// summaryVecs. +// +// DEPRECATED: as per the metrics overhaul KEP +type SummaryVec struct { + *prometheus.SummaryVec + *SummaryOpts + lazyMetric + originalLabels []string +} + +// NewSummaryVec returns an object which satisfies kubeCollector and wraps the +// prometheus.SummaryVec object. However, the object returned will not measure +// anything unless the collector is first registered, since the metric is lazily instantiated. +// +// DEPRECATED: as per the metrics overhaul KEP +func NewSummaryVec(opts *SummaryOpts, labels []string) *SummaryVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + v := &SummaryVec{ + SummaryOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + v.lazyInit(v, fqName) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *SummaryVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.SummaryOpts.DeprecatedVersion) +} + +func (v *SummaryVec) initializeMetric() { + v.SummaryOpts.annotateStabilityLevel() + v.SummaryVec = prometheus.NewSummaryVec(v.SummaryOpts.toPromSummaryOpts(), v.originalLabels) +} + +func (v *SummaryVec) initializeDeprecatedMetric() { + v.SummaryOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/summary.go#L485-L495 + +// WithLabelValues returns the ObserverMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new ObserverMetric is created IFF the summaryVec +// has been registered to a metrics registry. +func (v *SummaryVec) WithLabelValues(lvs ...string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + return v.SummaryVec.WithLabelValues(lvs...) +} + +// With returns the ObserverMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new ObserverMetric is created IFF the summaryVec has +// been registered to a metrics registry. +func (v *SummaryVec) With(labels map[string]string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + return v.SummaryVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *SummaryVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.SummaryVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *SummaryVec) Reset() { + if !v.IsCreated() { + return + } + + v.SummaryVec.Reset() +} + +// WithContext returns wrapped SummaryVec with context +func (v *SummaryVec) WithContext(ctx context.Context) *SummaryVecWithContext { + return &SummaryVecWithContext{ + ctx: ctx, + SummaryVec: *v, + } +} + +// SummaryVecWithContext is the wrapper of SummaryVec with context. +type SummaryVecWithContext struct { + SummaryVec + ctx context.Context +} + +// WithLabelValues is the wrapper of SummaryVec.WithLabelValues. +func (vc *SummaryVecWithContext) WithLabelValues(lvs ...string) ObserverMetric { + return vc.SummaryVec.WithLabelValues(lvs...) +} + +// With is the wrapper of SummaryVec.With. +func (vc *SummaryVecWithContext) With(labels map[string]string) ObserverMetric { + return vc.SummaryVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/value.go b/vendor/k8s.io/component-base/metrics/value.go new file mode 100644 index 0000000000..4a19aaa3bf --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/value.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +func (vt *ValueType) toPromValueType() prometheus.ValueType { + return prometheus.ValueType(*vt) +} + +// NewLazyConstMetric is a helper of MustNewConstMetric. +// +// Note: If the metrics described by the desc is hidden, the metrics will not be created. +func NewLazyConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + if desc.IsHidden() { + return nil + } + return prometheus.MustNewConstMetric(desc.toPrometheusDesc(), valueType.toPromValueType(), value, labelValues...) +} + +// NewLazyMetricWithTimestamp is a helper of NewMetricWithTimestamp. +// +// Warning: the Metric 'm' must be the one created by NewLazyConstMetric(), +// otherwise, no stability guarantees would be offered. +func NewLazyMetricWithTimestamp(t time.Time, m Metric) Metric { + if m == nil { + return nil + } + + return prometheus.NewMetricWithTimestamp(t, m) +} diff --git a/vendor/k8s.io/component-base/metrics/version.go b/vendor/k8s.io/component-base/metrics/version.go new file mode 100644 index 0000000000..f963e205eb --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/version.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "k8s.io/component-base/version" + +var ( + buildInfo = NewGaugeVec( + &GaugeOpts{ + Name: "kubernetes_build_info", + Help: "A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.", + StabilityLevel: ALPHA, + }, + []string{"major", "minor", "git_version", "git_commit", "git_tree_state", "build_date", "go_version", "compiler", "platform"}, + ) +) + +// RegisterBuildInfo registers the build and version info in a metadata metric in prometheus +func RegisterBuildInfo(r KubeRegistry) { + info := version.Get() + r.MustRegister(buildInfo) + buildInfo.WithLabelValues(info.Major, info.Minor, info.GitVersion, info.GitCommit, info.GitTreeState, info.BuildDate, info.GoVersion, info.Compiler, info.Platform).Set(1) +} diff --git a/vendor/k8s.io/component-base/metrics/version_parser.go b/vendor/k8s.io/component-base/metrics/version_parser.go new file mode 100644 index 0000000000..d52bd74bdf --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/version_parser.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "regexp" + + "github.com/blang/semver" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +const ( + versionRegexpString = `^v(\d+\.\d+\.\d+)` +) + +var ( + versionRe = regexp.MustCompile(versionRegexpString) +) + +func parseSemver(s string) *semver.Version { + if s != "" { + sv := semver.MustParse(s) + return &sv + } + return nil +} +func parseVersion(ver apimachineryversion.Info) semver.Version { + matches := versionRe.FindAllStringSubmatch(ver.String(), -1) + + if len(matches) != 1 { + panic(fmt.Sprintf("version string \"%v\" doesn't match expected regular expression: \"%v\"", ver.String(), versionRe.String())) + } + return semver.MustParse(matches[0][1]) +} diff --git a/vendor/k8s.io/component-base/metrics/wrappers.go b/vendor/k8s.io/component-base/metrics/wrappers.go new file mode 100644 index 0000000000..6ae8a458ac --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/wrappers.go @@ -0,0 +1,95 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// This file contains a series of interfaces which we explicitly define for +// integrating with prometheus. We redefine the interfaces explicitly here +// so that we can prevent breakage if methods are ever added to prometheus +// variants of them. + +// Collector defines a subset of prometheus.Collector interface methods +type Collector interface { + Describe(chan<- *prometheus.Desc) + Collect(chan<- prometheus.Metric) +} + +// Metric defines a subset of prometheus.Metric interface methods +type Metric interface { + Desc() *prometheus.Desc + Write(*dto.Metric) error +} + +// CounterMetric is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. + +// CounterMetric is an interface which defines a subset of the interface provided by prometheus.Counter +type CounterMetric interface { + Inc() + Add(float64) +} + +// CounterVecMetric is an interface which prometheus.CounterVec satisfies. +type CounterVecMetric interface { + WithLabelValues(...string) CounterMetric + With(prometheus.Labels) CounterMetric +} + +// GaugeMetric is an interface which defines a subset of the interface provided by prometheus.Gauge +type GaugeMetric interface { + Set(float64) + Inc() + Dec() + Add(float64) + Write(out *dto.Metric) error + SetToCurrentTime() +} + +// ObserverMetric captures individual observations. +type ObserverMetric interface { + Observe(float64) +} + +// PromRegistry is an interface which implements a subset of prometheus.Registerer and +// prometheus.Gatherer interfaces +type PromRegistry interface { + Register(prometheus.Collector) error + MustRegister(...prometheus.Collector) + Unregister(prometheus.Collector) bool + Gather() ([]*dto.MetricFamily, error) +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. +type Gatherer interface { + prometheus.Gatherer +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} diff --git a/vendor/k8s.io/component-base/version/.gitattributes b/vendor/k8s.io/component-base/version/.gitattributes new file mode 100644 index 0000000000..7e349eff60 --- /dev/null +++ b/vendor/k8s.io/component-base/version/.gitattributes @@ -0,0 +1 @@ +base.go export-subst diff --git a/vendor/k8s.io/component-base/version/base.go b/vendor/k8s.io/component-base/version/base.go new file mode 100644 index 0000000000..b753b7d191 --- /dev/null +++ b/vendor/k8s.io/component-base/version/base.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +// Base version information. +// +// This is the fallback data used when version information from git is not +// provided via go ldflags. It provides an approximation of the Kubernetes +// version for ad-hoc builds (e.g. `go build`) that cannot get the version +// information from git. +// +// If you are looking at these fields in the git tree, they look +// strange. They are modified on the fly by the build process. The +// in-tree values are dummy values used for "git archive", which also +// works for GitHub tar downloads. +// +// When releasing a new Kubernetes version, this file is updated by +// build/mark_new_version.sh to reflect the new version, and then a +// git annotated tag (using format vX.Y where X == Major version and Y +// == Minor version) is created to point to the commit that updates +// component-base/version/base.go +var ( + // TODO: Deprecate gitMajor and gitMinor, use only gitVersion + // instead. First step in deprecation, keep the fields but make + // them irrelevant. (Next we'll take it out, which may muck with + // scripts consuming the kubectl version output - but most of + // these should be looking at gitVersion already anyways.) + gitMajor string // major version, always numeric + gitMinor string // minor version, numeric possibly followed by "+" + + // semantic version, derived by build scripts (see + // https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md + // for a detailed discussion of this field) + // + // TODO: This field is still called "gitVersion" for legacy + // reasons. For prerelease versions, the build metadata on the + // semantic version is a git hash, but the version itself is no + // longer the direct output of "git describe", but a slight + // translation to be semver compliant. + + // NOTE: The $Format strings are replaced during 'git archive' thanks to the + // companion .gitattributes file containing 'export-subst' in this same + // directory. See also https://git-scm.com/docs/gitattributes + gitVersion = "v0.0.0-master+$Format:%H$" + gitCommit = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState = "" // state of git tree, either "clean" or "dirty" + + buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') +) diff --git a/vendor/k8s.io/component-base/version/version.go b/vendor/k8s.io/component-base/version/version.go new file mode 100644 index 0000000000..d1e76dc00e --- /dev/null +++ b/vendor/k8s.io/component-base/version/version.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "runtime" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +// Get returns the overall codebase version. It's for detecting +// what code a binary was built from. +func Get() apimachineryversion.Info { + // These variables typically come from -ldflags settings and in + // their absence fallback to the settings in ./base.go + return apimachineryversion.Info{ + Major: gitMajor, + Minor: gitMinor, + GitVersion: gitVersion, + GitCommit: gitCommit, + GitTreeState: gitTreeState, + BuildDate: buildDate, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.pb.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.pb.go index 9639fce29a..69d42622c8 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.pb.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.pb.go @@ -925,10 +925,7 @@ func (m *APIService) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1139,10 +1136,7 @@ func (m *APIServiceCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1259,10 +1253,7 @@ func (m *APIServiceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1504,10 +1495,7 @@ func (m *APIServiceSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1591,10 +1579,7 @@ func (m *APIServiceStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1728,10 +1713,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go index 7aff99be17..4a9ff15faa 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go @@ -925,10 +925,7 @@ func (m *APIService) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1139,10 +1136,7 @@ func (m *APIServiceCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1259,10 +1253,7 @@ func (m *APIServiceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1504,10 +1495,7 @@ func (m *APIServiceSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1591,10 +1579,7 @@ func (m *APIServiceStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1728,10 +1713,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/modules.txt b/vendor/modules.txt index 2936f0cb33..4025d387f9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -9,6 +9,8 @@ github.com/PuerkitoBio/urlesc github.com/asaskevich/govalidator # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile +# github.com/blang/semver v3.5.1+incompatible +github.com/blang/semver # github.com/blang/semver/v4 v4.0.0 github.com/blang/semver/v4 # github.com/cespare/xxhash/v2 v2.1.1 @@ -65,6 +67,8 @@ github.com/go-stack/stack ## explicit github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys +# github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e +github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.0 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes @@ -118,12 +122,13 @@ github.com/modern-go/reflect2 github.com/mwitkow/go-conntrack # github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/go-digest -# github.com/openshift/api v0.0.0-20210225162315-bae60f47eed7 +# github.com/openshift/api v0.0.0-20210706092853-b63d499a70ce ## explicit github.com/openshift/api/config/v1 +github.com/openshift/api/operator/v1 github.com/openshift/api/route/v1 github.com/openshift/api/security/v1 -# github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49 +# github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 ## explicit github.com/openshift/client-go/config/clientset/versioned github.com/openshift/client-go/config/clientset/versioned/scheme @@ -136,9 +141,14 @@ github.com/openshift/client-go/security/clientset/versioned/fake github.com/openshift/client-go/security/clientset/versioned/scheme github.com/openshift/client-go/security/clientset/versioned/typed/security/v1 github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake -# github.com/openshift/library-go v0.0.0-20210113192829-cfbb3f4c80c2 +# github.com/openshift/library-go v0.0.0-20210720093535-f8ed43828870 ## explicit +github.com/openshift/library-go/pkg/controller/factory github.com/openshift/library-go/pkg/crypto +github.com/openshift/library-go/pkg/operator/csr +github.com/openshift/library-go/pkg/operator/events +github.com/openshift/library-go/pkg/operator/management +github.com/openshift/library-go/pkg/operator/v1helpers # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -219,6 +229,8 @@ github.com/prometheus/prometheus/tsdb/errors github.com/prometheus/prometheus/tsdb/fileutil github.com/prometheus/prometheus/tsdb/tsdbutil github.com/prometheus/prometheus/util/strutil +# github.com/robfig/cron v1.2.0 +github.com/robfig/cron # github.com/spf13/pflag v1.0.5 github.com/spf13/pflag # go.mongodb.org/mongo-driver v1.4.6 @@ -365,9 +377,11 @@ k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/api/validation k8s.io/apimachinery/pkg/apis/meta/internalversion k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/apis/meta/v1/validation k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams @@ -411,6 +425,7 @@ k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect # k8s.io/apiserver v0.21.2 ## explicit +k8s.io/apiserver/pkg/authentication/serviceaccount k8s.io/apiserver/pkg/authentication/user # k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.21.2 ## explicit @@ -661,6 +676,8 @@ k8s.io/client-go/tools/clientcmd/api/latest k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/tools/metrics k8s.io/client-go/tools/pager +k8s.io/client-go/tools/record +k8s.io/client-go/tools/record/util k8s.io/client-go/tools/reference k8s.io/client-go/tools/remotecommand k8s.io/client-go/transport @@ -675,10 +692,13 @@ k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue # k8s.io/component-base v0.21.2 k8s.io/component-base/cli/flag +k8s.io/component-base/metrics +k8s.io/component-base/metrics/legacyregistry +k8s.io/component-base/version # k8s.io/klog/v2 v2.8.0 ## explicit k8s.io/klog/v2 -# k8s.io/kube-aggregator v0.20.0 +# k8s.io/kube-aggregator v0.21.1 ## explicit k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/v1